blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ff75359aa512f0fb77afae060b0368b59cd6e54 | c0a3c38e0bc84b439b7716b47a12b78ecc5772f7 | /PlayInternetRadio.py | e8ff2a35e3733be00a7857d134905d71bc6c9689 | [] | no_license | sidneycadot/LedDisplay | bd0bcbea26f2943610e5d5699155cf74c5778505 | 3413f680de5d24b762cf4feb43b5b34e2e905ea8 | refs/heads/master | 2020-12-29T02:24:58.527009 | 2017-06-08T11:39:29 | 2017-06-08T11:39:29 | 14,494,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,322 | py | #! /usr/bin/env python3
import os, select, sys, time, socket, re, subprocess, logging, math, sqlite3
from setup_logging import setup_logging
try:
from LedDisplay import LedDisplay
except:
pass # Error while importing
class Signal:
""" A lightweight implementation of the Signal/Slot mechanism.
"""
def __init__(self):
self._receivers = []
self._logger = logging.getLogger("signal")
def connect(self, receiver):
self._receivers.append(receiver)
def emit(self, *args, **kwargs):
# Note: if any of the receivers raises a signal,
# the other receivers will not be notified.
for receiver in self._receivers:
receiver(*args, **kwargs)
class AudioStreamPlayer:
""" A basic audioplayer class that pushes data to a subprocess audio player.
"""
def __init__(self, executable, executable_options):
self._logger = logging.getLogger(executable)
self._executable = executable
self._logger.debug("Starting {!r} sub-process ...".format(self._executable))
args = [executable] + [option for option in executable_options]
self._process = subprocess.Popen(args = args, stdin = subprocess.PIPE, stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
self._last_report_time = float("-inf")
self._bytecount = 0
self._start_time = time.time()
def __del__(self):
if self._process is not None:
self._logger.error("The __del__ method of class AudioStreamPlayer was called while the subprocess was still active. Please use explicit close() method.")
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._process is not None:
self.close()
def close(self):
assert self._process is not None
self._logger.debug("Stopping {!r} sub-process.".format(self._executable))
self._process.terminate() # ask nicely
self._process.wait()
self._process = None
def play(self, data):
current_time = time.time()
self._process.stdin.write(data)
self._bytecount += len(data)
REPORT_INTERVAL_SECONDS = 60.0
if current_time - self._last_report_time >= REPORT_INTERVAL_SECONDS:
megabytes = self._bytecount / 1048576.0
kilobits = self._bytecount * 8.0 / 1000.0 # These 'kilobits' are 1000 bits.
seconds = current_time - self._start_time
hours = seconds / 3600.0
megabytes_per_hour = megabytes / hours
kilobits_per_second = kilobits / seconds
hms_s = seconds
hms_h = math.floor(hms_s / 3600.0)
hms_s -= hms_h * 3600.0
hms_m = math.floor(hms_s / 60.0)
hms_s -= hms_m * 60.0
self._logger.info("Streamed {:.3f} MB in {}h{:02}m{:06.3f}s ({:.3f} MB/h, {:.6f} kbits/sec)".format(megabytes, hms_h, hms_m, hms_s, megabytes_per_hour, kilobits_per_second))
self._last_report_time = current_time
class MetadataLedDisplayDriver:
def __init__(self, led_device):
self._logger = logging.getLogger("MetadataLedDisplayDriver")
self._led_device = led_device
self._regexp = re.compile("StreamTitle='(.*)';StreamUrl='(.*)';")
if self._led_device is not None:
with LedDisplay(self._led_device) as led_display:
led_display.setBrightnessLevel("A")
led_display.setSchedule("A", "A")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# No-op
pass
def process(self, current_time, metadata):
if len(metadata) == 0:
return
match = self._regexp.match(metadata)
title = match.group(1)
title = title.rstrip()
title = title.replace(' ', ' ')
self._logger.info("Stream title: {!r}".format(title))
if self._led_device is not None:
# The "reggae" easter egg...
reggae_triggers = "Bob Marley, Peter Tosh, Gregory Isaacs, Tenor Saw, reggae"
if any(r.strip().lower() in title.lower() for r in reggae_triggers.split(",")):
color_directive = "<CR>" # reggae colors
else:
color_directive = "<CG>" # regular colors
# Make led display command.
led_message = "<L1><PA><FE><MA><WB><FE><AC>{}{} <CD><KD> <KT>".format(color_directive, title.encode("ASCII", errors = "replace").decode())
with LedDisplay(self._led_device) as led_display:
led_display.send(led_message)
class MetadataFileWriter:
def __init__(self, filename):
self._logger = logging.getLogger("MetadataFileWriter")
self._filename = filename
with open(self._filename, "a", encoding = "utf-8") as f:
print("restart", file = f)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def process(self, current_time, metadata):
if len(metadata) == 0:
return
with open(self._filename, "a", encoding = "utf-8") as f:
print("{:20.9f} {}".format(current_time, metadata), file = f)
class MetadataDatabaseWriter:
def __init__(self, filename):
self._logger = logging.getLogger("MetadataDatabaseWriter")
self._logger.debug("Opening database ...")
self._conn = sqlite3.connect(filename)
cursor = self._conn.cursor()
query = """CREATE TABLE IF NOT EXISTS metadata(
id INTEGER NOT NULL PRIMARY KEY,
timestamp REAL NOT NULL,
duration REAL NULL,
metadata BLOB NOT NULL
);"""
query = query.split("\n")
query[1:] = [q[16:] for q in query[1:]]
query = "\n".join(query)
cursor.execute(query)
self._counter = 0
self._last_time = None
self._last_rowid = None
def __del__(self):
if self._conn is not None:
self._logger.error("The __del__ method of class MetadataDatabaseWriter was called while the writer was still active. Please use explicit close() method.")
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._conn is not None:
self.close()
def close(self):
assert self._conn is not None
self._logger.debug("Closing database ...")
self._conn.close()
self._conn = None
def process(self, current_time, metadata):
if len(metadata) == 0:
return
self._counter += 1
cursor = self._conn.cursor()
# The duration of the first song cannot be determined.
# The duration of the second song cannot be reliably calculated
# (because the timestamp of the first song is unreliable).
if self._counter > 2:
duration = current_time - self._last_time
query = "UPDATE metadata SET duration = ? WHERE rowid = ?;"
cursor.execute(query, (duration, self._last_rowid))
query = "INSERT INTO metadata(timestamp, metadata) VALUES (?, ?);"
cursor.execute(query, (current_time, metadata))
self._conn.commit()
self._last_time = current_time
self._last_rowid = cursor.lastrowid
class StreamStalledError(RuntimeError):
pass
class InternetRadioPlayer:
def __init__(self, host, port, path):
self._logger = logging.getLogger("{}:{}".format(host, port))
self._host = host
self._port = port
self._path = path
self.audiodata_signal = Signal()
self.metadata_signal = Signal()
def play(self):
SOCKET_RECV_TIMEOUT = 1.0 # seconds
SOCKET_RECV_SIZE = 4096 # larger than 1 regular IP packet.
NO_DATA_THRESHOLD_TIME = 10.0
address = (self._host, self._port)
stream_socket = socket.create_connection(address)
try:
request = "GET {} HTTP/1.0\r\nHost:{}\r\nIcy-MetaData: 1\r\nAccept: */*\r\n\r\n".format(self._path, self._host).encode()
self._logger.info("Sending HTTP request: {!r} ...".format(request))
stream_socket.send(request)
# Read the response header (HTTP-like).
stream_buffer = bytearray()
last_data_time = time.time()
in_header = True
icy_metadata_interval = None
icy_content_type = None
while True: # Loop to read more data.
while True: # loop to process available bytes.
if in_header:
idx = stream_buffer.find(b"\r\n")
if idx < 0:
break # CR/LF not found. We need to read more data to proceed.
# Found CR/LF ; extract and process the line.
# Extract header line from stream buffer and convert it to a string.
headerline = stream_buffer[:idx].decode(errors = "replace")
# Discard header line and CR/LF characters from the stream buffer.
del stream_buffer[:idx + 2]
self._logger.info("Received response header: {!r}".format(headerline))
if headerline.startswith("icy-metaint:"):
assert icy_metadata_interval is None
icy_metadata_interval = int(headerline[12:])
elif headerline.startswith("content-type:"):
assert icy_content_type is None
icy_content_type = headerline[13:]
elif len(headerline) == 0:
# Empty line terminates the header!
assert icy_metadata_interval is not None
assert icy_content_type is not None
stream_audio_bytes_until_metadata = icy_metadata_interval
in_header = False
elif stream_audio_bytes_until_metadata > 0:
# We should stream audio data.
if len(stream_buffer) == 0:
break # No data available for streaming. Need to read more data.
# Determine how much of the buffer is audio data.
stream_audio_bytes = min(stream_audio_bytes_until_metadata, len(stream_buffer))
# Emit the audio stream data using a signal.
self.audiodata_signal.emit(stream_buffer[:stream_audio_bytes])
# Remove the audio stream data from the stream buffer.
del stream_buffer[:stream_audio_bytes]
# Decrease the number of bytes until the next metadata chunk.
stream_audio_bytes_until_metadata -= stream_audio_bytes
else:
# We are expecting a metadata chunk.
if len(stream_buffer) == 0:
break # More data needed; at least the metadata chunk length byte.
metadata_size = 16 * stream_buffer[0]
if len(stream_buffer) < 1 + metadata_size:
break # More data needed; metadata chunk incomplete.
# We have a complete metadata chunk.
metadata = stream_buffer[1:1 + metadata_size]
metadata = metadata.rstrip(b"\0").decode(errors = "replace")
self.metadata_signal.emit(last_data_time, metadata)
# Discard the metadata size byte as well as the metadata itself.
del stream_buffer[:1 + metadata_size]
# Next traversal of data processing loop will process audio data.
stream_audio_bytes_until_metadata = icy_metadata_interval
# Read more data.
while True:
(rlist, wlist, xlist) = select.select([stream_socket], [], [stream_socket], SOCKET_RECV_TIMEOUT)
select_time = time.time()
assert len(wlist) == 0
assert len(xlist) == 0
if stream_socket in rlist:
break # Data available.
self._logger.warning("No data received for {:.1f} seconds.".format(select_time - last_data_time))
if select_time - last_data_time > NO_DATA_THRESHOLD_TIME:
raise StreamStalledError()
last_data_time = select_time
packet = stream_socket.recv(SOCKET_RECV_SIZE)
stream_buffer.extend(packet)
finally:
self._logger.info("Closing stream socket ...")
stream_socket.close()
def main():
host = "pc192.pinguinradio.com"
port = 80
path = "/"
led_device = os.getenv("LED_DEVICE")
if "--debug" in sys.argv[1:]:
log_level = logging.DEBUG
else:
log_level = logging.INFO
log_filename = "PlayInternetRadio_%Y%m%d_%H%M%S.log"
log_format = "%(asctime)s | %(levelname)-10s | %(name)-25s | %(message)s"
with setup_logging(logfile_name = log_filename, fmt = log_format, level = log_level):
logger = logging.getLogger("main")
with AudioStreamPlayer("mpg123", ["-"]) as audiostream_player, \
MetadataLedDisplayDriver(led_device) as metadata_led_driver, \
MetadataDatabaseWriter("metadata.sqlite3") as metadata_database_writer, \
MetadataFileWriter("metadata.log") as metadata_file_writer:
RETRY_INTERVAL = 5.0
while True:
try:
radioPlayer = InternetRadioPlayer(host, port, path)
radioPlayer.audiodata_signal.connect(audiostream_player.play)
radioPlayer.metadata_signal.connect(metadata_led_driver.process)
radioPlayer.metadata_signal.connect(metadata_file_writer.process)
radioPlayer.metadata_signal.connect(metadata_database_writer.process)
radioPlayer.play() # blocking call
except KeyboardInterrupt:
logger.info("Quitting by user request.")
break
except socket.gaierror as exception:
logger.error("getaddrinfo() error: {}".format(exception))
except StreamStalledError:
logger.error("Stream has stalled; unable to play.")
except BaseException as exception:
logger.exception("Unknown exception: {!r}".format(exception))
logger.info("Sleeping for {} seconds before retry ...".format(RETRY_INTERVAL))
time.sleep(RETRY_INTERVAL)
# Next traversal of loop attempts new connection.
if __name__ == "__main__":
main()
| [
"sidney@jigsaw.nl"
] | sidney@jigsaw.nl |
4644ab0b675a97687d04eb2b5e216bb376972288 | 2e0c35b02706fb5fb4fa489812deb5135345fe74 | /Mundo 2/aula 9.py | abdfe6d9ba4a260467a8cd7d696f62019b9b92c9 | [] | no_license | damiati-a/CURSO-DE-PYTHON | 86a6d063d34ac7c4835fc03cd96c3e708141556f | cc8973611c0175ad0a2cd9b302287eb49f213c47 | refs/heads/main | 2023-04-21T09:39:12.806428 | 2021-05-29T13:01:13 | 2021-05-29T13:01:13 | 371,973,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | # Estruturas de repetição (for)
'''
faço 'c' no intervalo(1,10)
for 'c' in range(1,10)
'''
'''
laço c no intervalo(0,3)
passo
pula
passo
pega
#################################
for c in range(0,3)
passo
pula
passo
pega
'''
'''
for c in range(0,3)
if moeda:
pega
passo
pula
passo
pega
'''
# Parte Parática
'''
for c in range(0, 30, 3):
print(c)
print('FIM')
'''
'''
n = int(input('Digite um número: '))
for c in range(0, n+1):
print(c)
print('FIM')
'''
'''
i = int(input('Inicio: ')) # qual numero começar
f = int(input('Fim: ')) # qual numero terminar
p = int(input('Passo: ')) # e de quanto em quanto pular
for c in range(i, f + 1, p):
print(c)
print('fim')
'''
for c in range(0, 3):
n = int(input('DIgite um numero: ')) # vai solicitar que se digite um numero 3 vezes
s = n + n
print('O somatorio de tudo foi: {}'.format(s))
| [
"noreply@github.com"
] | noreply@github.com |
6bd41a058a205f913ee719533a4694bb10aac100 | 98b203d8ecf2f51ab0f7707eeee09ee07d109577 | /python/problems/longest_increasing_subseq_effi.py | 4d403b06cc0d168ddb53ec4b98265e6702b48cd8 | [] | no_license | razerboot/DataStrcutures-and-Algorithms | 9967727a730fa59daa00a91c021042d885584b10 | b47560efe4fa59ae255dc83c791e18acd9813c22 | refs/heads/master | 2021-07-16T19:08:16.770941 | 2021-01-21T11:19:19 | 2021-01-21T11:19:19 | 95,435,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #for strictly increasing sequence in a array with distinct elements
#ref : www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/
def floor(arr, val, l, r):
while r - l > 1:
mid = (l + r) / 2
if arr[mid] <= val:
l = mid
else:
r = mid
return l
n = input()
arr = map(int, raw_input().split())
# aux array stores last elements of active lists (active lists are of different
# sizes which can potential candidate for LIS)
aux_arr = [0] * n
P = {}
l = 0
for i in xrange(n):
#case 1 - no element in aux array
if l == 0:
aux_arr[0] = arr[0]
P[arr[i]] = arr[i]
l += 1
else:
p = floor(aux_arr, arr[i], -1, l)
# case 2
# current element is less than all last elements then create a new list of size 1 with current ele
# this can simulated using aux array by replacing first element with curr element bcoz first element is bigger
# than curr element if LIS is there from the first element then same LIS can be modified for curr element so it
# is safe
if p == -1:
P[arr[i]] = arr[i]
aux_arr[0] = arr[i]
# case 3
# extend the aux array which simulates (we clone largest list and add curr element)
elif p == l - 1:
P[arr[i]] = aux_arr[l - 1]
l += 1
aux_arr[l - 1] = arr[i]
# case 2
# clone list corresponding to floor value and add curr element and remove all lists of same size of new list
# this can simulated by just replacing value in aux arr at p + 1 index
else:
P[arr[i]] = aux_arr[p]
aux_arr[p + 1] = arr[i]
print l
val = aux_arr[l - 1]
lis = [val]
while val != P[val]:
val = P[val]
lis.append(val)
print lis[::-1] | [
"akshaykumar@akshaymac.local"
] | akshaykumar@akshaymac.local |
7d0eac6bc6769a63f609d726e612586ed47b6af8 | e1ae535d8613aae44e8f9eaa4daf50c1e63665b7 | /multimedia/south_migrations/0026_auto__chg_field_remotestorage_media.py | e4f8b05c6dae4836b6317150e40ea7eda035d2ed | [] | no_license | teury/django-multimedia | 48b8fba9abc101286990b1306d85967bd197f08e | 4ddd5e6d9f4f680e2f4f68cc3616ced8f0fc2a43 | refs/heads/master | 2021-01-16T20:50:24.573686 | 2015-04-23T21:22:38 | 2015-04-23T21:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,388 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'RemoteStorage.media'
db.alter_column(u'multimedia_remotestorage', 'media_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multimedia.Media'], null=True, on_delete=models.SET_NULL))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'RemoteStorage.media'
raise RuntimeError("Cannot reverse this migration. 'RemoteStorage.media' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'RemoteStorage.media'
db.alter_column(u'multimedia_remotestorage', 'media_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['multimedia.Media']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'multimedia.encodeprofile': {
'Meta': {'object_name': 'EncodeProfile'},
'command': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'container': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'file_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.media': {
'Meta': {'ordering': "(u'-created',)", 'object_name': 'Media'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'profiles': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['multimedia.EncodeProfile']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'multimedia.remotestorage': {
'Meta': {'object_name': 'RemoteStorage'},
'content_hash': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'media': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.Media']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multimedia.EncodeProfile']", 'on_delete': 'models.PROTECT'})
}
}
complete_apps = ['multimedia'] | [
"jason.bittel@gmail.com"
] | jason.bittel@gmail.com |
9a6be77d3f1ab6b5515bb83d0b6a6eee5e09b43b | eda7fbf7bbc0614e6fc448d2f6e3fd1918dadcbe | /new-api-tests/applications/create-surface-caps-from-centerlines/create_surface_caps.py | e61f1afbfba81befc17a2e58529183112bb6877e | [] | no_license | SimVascular/SimVascular-Tests | e97c136ad3bf3a7275d40c0323abca7817eb2eca | 55018e1edcd070bce77ae5af4caf2105353d3697 | refs/heads/master | 2023-02-11T02:19:06.755815 | 2023-02-02T18:26:31 | 2023-02-02T18:26:31 | 42,211,398 | 2 | 10 | null | 2023-02-02T18:26:32 | 2015-09-10T00:06:14 | Python | UTF-8 | Python | false | false | 3,496 | py | #!/usr/bin/env python
"""This script is used to create an SV model from a closed segmentation surface.
The
"""
import argparse
import os
import sys
from centerlines import Centerlines
from surface import Surface
sys.path.insert(1, '../../graphics/')
import graphics as gr
def parse_args():
'''Parse command-line arguments.
'''
parser = argparse.ArgumentParser()
parser.add_argument("--clip-distance", type=float, default=0.0,
help="The distance from the end of a centerline branch to clip a surface.")
parser.add_argument("--clip-width-scale", type=float, default=1.0,
help="The width multiplied by the centerline branch end radius to define the width of the box used to clip a surface.")
parser.add_argument("--surface-file", required=True, help="Input surface (.vtp or .vtk) file.")
parser.add_argument("--mesh-scale", type=float, default=1.0,
help="The factor used to scale the fe volume meshing edge size. A larger scale creates a coarser mesh. The initial edge size is determined from the largest surface triangle.")
parser.add_argument("--remesh-scale", type=float, default=1.0,
help="The factor used to scale the surface remeshing edge size. A larger scale creates a coarser suface mesh. The initial edge size is determined from the largest surface triangle.")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return args
def main():
# Get command-line arguments.
args = parse_args()
## Create renderer and graphics window.
win_width = 500
win_height = 500
renderer, renderer_window = gr.init_graphics(win_width, win_height)
## Read in the segmentation surface.
surface_file_name = args.surface_file
surface = Surface(gr, renderer_window, renderer)
surface.read(surface_file_name)
gr_geom = gr.add_geometry(renderer, surface.geometry, color=[0.8, 0.8, 1.0])
surface.vtk_actor = gr_geom
#gr_geom.GetProperty().SetOpacity(0.5)
## Create a Centerlines object used to clip the surface.
centerlines = Centerlines()
centerlines.graphics = gr
centerlines.surface = surface
centerlines.window = renderer_window
centerlines.renderer = renderer
centerlines.clip_distance = args.clip_distance
centerlines.clip_width_scale = args.clip_width_scale
centerlines.remesh_scale = args.remesh_scale
centerlines.mesh_scale = args.mesh_scale
print("---------- Alphanumeric Keys ----------")
print("a - Compute model automatically for a three vessel surface with flat ends.")
print("c - Compute centerlines.")
print("m - Create a model from the surface and centerlines.")
print("q - Quit")
print("s - Select a centerline source point.")
print("t - Select a centerline target point.")
print("u - Undo the selection of a centerline source or target point.")
## Create a mouse interactor for selecting centerline points.
picking_keys = ['s', 't']
event_table = {
'a': (surface.create_model_automatically, centerlines),
'c': (surface.compute_centerlines, surface),
'm': (centerlines.create_model, surface),
's': surface.add_centerlines_source_node,
't': surface.add_centerlines_target_node
}
interactor = gr.init_picking(renderer_window, renderer, surface.geometry, picking_keys, event_table)
## Display window.
interactor.Start()
if __name__ == '__main__':
main()
| [
"davep@stanford.edu"
] | davep@stanford.edu |
fb6e690787905e1481dff64dee25695f22bf3eee | 70058f6db76e6adfbb06ed6245a63637b83fe9ef | /opendart/config/__init__.py | e64e9265798874238fe8da1d312e841fe0ab8876 | [
"MIT"
] | permissive | JehunYoo/opendart | 9c3235d3904e4c43bc5702a409ea65bb97d35ee3 | c88105baf85af57d006cc2404d192aaf9baf73cc | refs/heads/main | 2023-06-16T01:53:05.654129 | 2021-07-07T07:17:20 | 2021-07-07T07:17:20 | 380,931,447 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | from opendart.config.config import * | [
"percyu98@gmail.com"
] | percyu98@gmail.com |
b70442fbe888bee246a8aac51eb35d72971386bc | 82dc886733bd7999d8189e03f1f7a6201541abaa | /_operators.py | 5ceb1071dc7421d674d12265e9b33bfdd80f4ed5 | [] | no_license | lucaskaimthelen/python_training_3 | b78f4da101627ad2974538bc0926e65f82749e18 | 4339dd125b7b67d153f8b1f2eac966f4dde5d9bd | refs/heads/master | 2020-07-30T12:09:06.007258 | 2019-09-22T23:37:20 | 2019-09-22T23:37:20 | 210,228,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | # http://www.tutorialspoint.com/python/python_basic_operators.htm
# Python operators.
a = 2
b = 2
# Equality
print(a == b)
# Identity tests if the two objects in memory are the same. is and is not.
a = [1, 2, 3]
b = [1, 2, 3]
print(id(a))
print(id(b))
print(a is b)
# Set b object equal to a object.
a = b
print(id(a))
print(id(b))
print(a is b)
a = 10
b = 2
# Not equal.
print(a!= b)
# Less than.
print(a < b)
# Less then or equal to.
print(a <= b)
a = 10
# In operator.
a in [10, 11, 13]
# Not in operator.
a not in [10, 11, 13]
# Add and.
b = 2
b += 1
print(b)
# Subtract AND
b = 2
b -= 1
print(b)
| [
"lucaskaimthelen@gmail.com"
] | lucaskaimthelen@gmail.com |
f64e6334a50348abd20c1e2b1141f25c1a15d653 | 38bd99c72ca2521489ce1eb02b7604095b02b585 | /src/1680-ConcatenationOfConsecutiveBinaryNumbers.py | 67fc18efbe6b891b864fd59abb68a2db2a44bdad | [
"MIT"
] | permissive | Jiezhi/myleetcode | eadbd7d9f1f0ea6a0ee15c2da9040dcfbd28b522 | 4dd1e54d8d08f7e6590bc76abd08ecaacaf775e5 | refs/heads/master | 2023-03-16T15:52:21.833622 | 2023-03-09T14:33:03 | 2023-03-09T14:33:03 | 139,965,948 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | #!/usr/bin/env python3
"""
CREATED AT: 2022-09-23
URL: https://leetcode.com/problems/concatenation-of-consecutive-binary-numbers/
GITHUB: https://github.com/Jiezhi/myleetcode
FileName: 1680-ConcatenationOfConsecutiveBinaryNumbers
Difficulty: Medium
Desc:
Tag:
See: https://leetcode.cn/problems/concatenation-of-consecutive-binary-numbers/solution/lian-jie-lian-xu-er-jin-zhi-shu-zi-by-ze-t40j/
"""
class Solution:
def concatenatedBinary(self, n: int) -> int:
"""
Runtime: 2060 ms, faster than 66.93%
Memory Usage: 13.9 MB, less than 80.31%
1 <= n <= 10^5
"""
module = 10 ** 9 + 7
ret, shift = 0, 0
for i in range(1, n + 1):
if i & (i - 1) == 0:
shift += 1
ret = ((ret << shift) + i) % module
return ret
def test():
assert Solution().concatenatedBinary(n=1) == 1
assert Solution().concatenatedBinary(n=3) == 27
assert Solution().concatenatedBinary(n=12) == 505379714
if __name__ == '__main__':
test()
| [
"Jiezhi@users.noreply.github.com"
] | Jiezhi@users.noreply.github.com |
e181ba65208da4c102ad9870abd378ebd3ed1ebf | 25b16b0604e71edc3a695ea9254c2214a8388a3d | /mainwindow.py | 23e3df87bbe4001ce4a92a894795808b791c9cc4 | [] | no_license | peetpan/Facial-Recognition-Attendance-System | 38db4fff53a6322a7b3d529681e273d80c8b2e51 | a0b5946bd3a3805c112212ca2e5d3bd42ece3e9b | refs/heads/main | 2023-04-28T13:04:17.324366 | 2021-05-23T07:07:29 | 2021-05-23T07:07:29 | 369,984,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py |
import sys
from PyQt5.uic import loadUi
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QApplication, QDialog
import resource
# from model import Model
from out_window import Ui_OutputDialog
class Ui_Dialog(QDialog):
def __init__(self):
super(Ui_Dialog, self).__init__()
loadUi("mainwindow.ui", self)
self.runButton.clicked.connect(self.runSlot)
self._new_window = None
self.Videocapture_ = None
def refreshAll(self):
"""
Set the text of lineEdit once it's valid
"""
self.Videocapture_ = "0"
@pyqtSlot()
def runSlot(self):
"""
Called when the user presses the Run button
"""
print("Clicked Run")
self.refreshAll()
print(self.Videocapture_)
self.hide() # hide the main window
self.outputWindow_() # Create and open new output window
def outputWindow_(self):
"""
Created new window for vidual output of the video in GUI
"""
self._new_window = Ui_OutputDialog()
self._new_window.show()
self._new_window.startVideo(self.Videocapture_)
print("Video Played")
# if __name__ == "__main__":
# app = QApplication(sys.argv)
# ui = Ui_Dialog()
# ui.show()
# sys.exit(app.exec_())
| [
"peetamber.p@somaiya.edu"
] | peetamber.p@somaiya.edu |
89fce0cb8074ad2a3c7b7435827857705b5155b5 | b81d0c338e0287e37f89c9bab03a48a58e58f689 | /My_Blog/My_Blog/urls.py | 93a07cd46fb988743eb9610d79fc6669809ea9ff | [] | no_license | syedfahimabrar/Django_Blog_Project | 5773ffa7409ff039515c64da3abd4ef0168db298 | 0e1210781d697f6fef11cfce2898618448446a40 | refs/heads/master | 2022-10-25T11:17:36.783557 | 2020-06-17T11:39:38 | 2020-06-17T11:39:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns, static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('app_login.urls')),
path('blogs/', include('app_blog.urls')),
path('', views.Index, name='index'),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"mahdi15-1078@diu.edu.bd"
] | mahdi15-1078@diu.edu.bd |
f2fa43b4315009568f7b49f8d37a9aa0e6d9d2e1 | cfcc212a8f9da735122167a8e8abca41dd56022a | /rqt_battery/rqt_battery_py/src/rqt_batterypkg/my_module.py | 46d82b8247a3c98de26d3b3f79ee72d605b579b3 | [] | no_license | zhexxian/SUTD-S03-DSO-Indoor-Drone | 3d5b8a126fedbf0eb24f626fb76786736fb634b0 | 6a56f357de24e9948b02f091fb63ad7481336cad | refs/heads/master | 2021-03-16T10:17:26.103007 | 2017-07-28T13:31:26 | 2017-07-28T13:31:26 | 77,600,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,258 | py | import os
import rospkg
import rospy
import tf
from std_msgs.msg import Float32
from crazyflie_driver.msg import Command, int_array, Pose2D, equation, equation_array
from geometry_msgs.msg import Pose, PoseArray
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtWidgets import QWidget
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import QtGui, QtCore
class battery(Plugin):
def __init__(self, context):
super(battery, self).__init__(context)
# Give QObjects reasonable names
self.setObjectName('Battery')
rp = rospkg.RosPack()
UPDATE_RATE = 30
# Process standalone plugin command-line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
# Add argument(s) to the parser.
parser.add_argument("-q", "--quiet", action="store_true",
dest="quiet",
help="Put plugin in silent mode")
args, unknowns = parser.parse_known_args(context.argv())
if not args.quiet:
print 'arguments: ', args
print 'unknowns: ', unknowns
self._container = QWidget()
self._container.setObjectName('BatteryUI')
self._layout = QVBoxLayout()
self._container.setLayout(self._layout)
context.add_widget(self._container)
if context.serial_number() > 1:
self._container.setWindowTitle(self._container.windowTitle() + (' (%d)' % context.serial_number()))
# find the number of crazyflies connected
self.crazyflieNum = 0
self.checked = []
allParams = rospy.get_param_names()
for topic in allParams:
# print topic
if "crazyflie" in topic and topic[0:11] not in self.checked:
self.checked.append(topic[0:11])
self.crazyflieNum += 1
print self.checked
print self.crazyflieNum
# keep track of battery levels
self.battArr = []
# set name of topic to publish to
batteryStr = "/battery"
crazyflieStr = "/crazyflie"
for i in range(self.crazyflieNum):
self.battArr.append(0.0)
topicName = crazyflieStr+str(i)+batteryStr
print topicName
rospy.Subscriber(topicName, Float32, self.callback, i)
self.battLabel=QLabel("Battery for crazyflie" + str(i))
setattr(self, "batteryBar"+str(i), QProgressBar()) # equivalent to self.batteryBari = QProgressBar()
progressBar = getattr(self, "batteryBar%d" % i) # equivalent to progressBar = self.batteryBari
self._layout.addWidget(self.battLabel)
self._layout.addWidget(progressBar)
rospy.Timer(rospy.Duration(1.0/UPDATE_RATE), self.updateDisplay)
def callback(self, data, flieNum):
self.battArr[flieNum] = float(data.data)
# print "battery voltage for crazyflie" + str(flieNum) + ": " + str(data.data)
def updateDisplay(self, event):
# print self.battArr
for k in range(len(self.battArr)):
currentBatt = self.battArr[k]
updatedVal = self.normalizeBattery(currentBatt)
progressBar = getattr(self, "batteryBar%d" % k)
progressBar.setValue(updatedVal)
# self._container.progressBar.setValue(updatedVal)
def normalizeBattery(self, curr):
fullLevel = 4.0 - 2.164
currLevel = curr - 2.164
return (currLevel / fullLevel) * 100
def shutdown_plugin(self):
# TODO unregister all publishers here
pass
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
pass
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget title bar
# Usually used to open a modal configuration dialog
| [
"shunyutan@hotmail.com"
] | shunyutan@hotmail.com |
5d59ef3a58fea973713638a67bc342b2ad0a9f27 | d3f948c200c334341d710880f92c25fc01f82272 | /venv/Scripts/pip3-script.py | f2532a22e72df406bc4317b41d1e391db437b309 | [] | no_license | reyvergara/project3 | 3d91e53e9d7b771075002ba80e745afc9b90e10f | b62f9c6b3ffd5c0f83ee9d9714b42647edd5d74a | refs/heads/master | 2020-08-10T12:47:05.597752 | 2019-10-11T05:01:49 | 2019-10-11T05:01:49 | 214,345,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #!C:\Users\rayve\PycharmProjects\Project3\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"reyvergara@csu.fullerton.edu"
] | reyvergara@csu.fullerton.edu |
9cddd0d77233442e3a96b4c9c680d6900a6ac27f | 462548ed980e90a878445df59785e2654a6681ed | /services/TutorialCardService.py | 573fe74ac79a6f1ce9d9946e7b26f1071c903063 | [] | no_license | Orpheusp/ohm-chatbot-backend | 5828a5463626c5875d21623c82839183003cb0f4 | bf8c0fb13c8bd05b5e3564f6af5677563aa72d36 | refs/heads/main | 2023-01-11T10:55:35.697073 | 2020-11-16T09:42:50 | 2020-11-16T09:42:50 | 312,749,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | from models.TutorialCard import TutorialCardDocument
from models.typedefs import TUTORIAL_CARD_TYPE
from typing import Optional, List, Union
class TutorialCardResource:
resource: str
resource_text: Optional[str]
resource_type: str
forward_condition: Optional[str]
forward_condition_type: Optional[str]
def __init__(
self,
resource: str,
resource_type: str,
resource_text: Optional[str]=None,
forward_condition: Optional[str]=None,
forward_condition_type: Optional[str]=None
):
self.resource = resource
self.resource_type = resource_type
self.resource_text = resource_text
self.forward_condition = forward_condition
self.forward_condition_type = forward_condition_type
def get_tutorial_cards_from_db() -> List[TutorialCardDocument]:
return TutorialCardDocument.objects
def _get_tutorial_card_resource_code(index: int) -> str:
padded_index = f'0{index}' if index < 10 else f'{index}'
return f'{TUTORIAL_CARD_TYPE}{padded_index}'
def populate_tutorial_card_db_entry(
resources: List[TutorialCardResource],
title: str,
supporting_text: Optional[str]=None
) -> TutorialCardDocument:
index = TutorialCardDocument.objects.count()
entry = TutorialCardDocument(
auth_required=False,
resource_code=_get_tutorial_card_resource_code(index),
resources=resources,
supporting_text=supporting_text,
title=title,
type=TUTORIAL_CARD_TYPE,
)
entry.save()
return entry
def get_tutorial_card_entry(
card_id: str) -> Union[type(None), TutorialCardDocument]:
entry = TutorialCardDocument.objects(resource_code=card_id).first()
return entry
| [
"stannolite@gmail.com"
] | stannolite@gmail.com |
7f360a2cd4082b6bab85f1e0827faf0b71b4a0d5 | 45413941c9a17483850b83bf621b68f2639eeb74 | /emotionEmoji.py | 366a93a305e489bd0a1183b38cb058d0aa134c05 | [] | no_license | anvi-mittal31/EROS-Your-Own-Design-Store | 6f6f4688c98914f571681b3945bd3ac26a59e6fd | f99ec6c3706dcb36a4de3789183412a17e5d9a6b | refs/heads/master | 2023-07-04T21:20:47.681096 | 2021-08-07T18:20:21 | 2021-08-07T18:20:21 | 378,417,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | from flask.templating import render_template
from tensorflow.keras.models import model_from_json
import numpy as np
import cv2
import os
import random
class FacialExpressionModel(object):
EMOTIONS_LIST = ["ANGRY", "DISGUST", "FEAR", "HAPPY", "SAD", "SURPRISE", "NEUTRAL"]; ## dont change the order
def __init__(self, model_json_file, model_weights_file):
# load model from JSON file
with open(model_json_file, "r") as json_file:
loaded_model_json = json_file.read()
self.loaded_model = model_from_json(loaded_model_json)
# load weights into the new model
self.loaded_model.load_weights(model_weights_file)
print("Model loaded from disk")
self.loaded_model.summary()
def predict_emotion(self, img):
self.preds = self.loaded_model.predict(img)
self.preds[4:6] += 0.1
self.preds[1:3] += 0.2
lbl = np.argmax(self.preds)
return FacialExpressionModel.EMOTIONS_LIST[lbl], lbl
'''rgb = cv2.VideoCapture(0)
emo_happy = cv2.imread('happy.png',1)
emo_sad = cv2.imread('sad.png',1)
emo_fear = cv2.imread('fear.png',1)
emo_disgust = cv2.imread('disgust.png',1)
emo_surprise = cv2.imread('surprise.png',1)
emo_angry = cv2.imread('angry.png',1)
emo_neutral = cv2.imread('neutral.png',1)
emoji = [emo_angry,emo_disgust,emo_fear,emo_happy,emo_sad,emo_surprise,emo_neutral] #fix order'''
'''def __get_data__(rgb):
_, fr = rgb.read()
gray = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = facec.detectMultiScale(gray, 1.25, 5)
return faces, fr, gray'''
def start_app(frame):
font = cv2.FONT_HERSHEY_SIMPLEX
facec = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
cnn = FacialExpressionModel("model1.json", "chkPt1.hdf5")
#while True:
#faces, fr, gray_fr = __get_data__(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
fc = gray[y:y+h, x:x+w]
fc = cv2.normalize(fc,None,0,255,cv2.NORM_MINMAX)
# fc = cv2.addWeighted(fc,1.5,blur,-0.5,0)
roi = cv2.resize(fc, (48, 48))
pred, lbl = cnn.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
#print(pred)
#list.append(pred)
cv2.putText(frame, pred, (x, y), font, 1, (255, 255, 0), 2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
x1 = x + w//2
y1 = y + h//2
#emo = emoji[lbl]
return frame,pred
# def wallpaperHandler():
# if (FINAL_PRED == 'happy'):
# return render_template()
'''try:
emo = cv2.resize(emo,(h,w))
frame[y:y+h,x:x+w] = cv2.addWeighted(frame[y:y+h,x:x+w],0.5,emo,0.5,0)
except Exception as e:
print(str(e))'''
'''if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
#break
# cv2.imshow("img",emo)
cv2.imshow('Filter', frame)'''
'''def detect():
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
frame = cv2.resize(frame, (1366, 800))
start_app(frame)
return frame
cv2.imshow('Filter',frame)
cv2.waitKey(1)
else:
break
cap.release()
cv2.destroyAllWindows()
#start_app()'''
| [
"mittalanvi31@gmail.com"
] | mittalanvi31@gmail.com |
d9af383d21bace3659d9e389f6fd471dde4ed386 | fd2a7b889809ed302d2b3013cefb2c441dfb642a | /venv/bin/pylint | d519fa039ead9994ac2d7332d516f5a5eb8c57bd | [] | no_license | AnotherCoolDude/hello_flask | f27a976243e7956d9cc1a43953a119e396606089 | 79069a2975b479cb7479822c5946607783ab37b0 | refs/heads/master | 2022-10-08T13:10:44.602551 | 2019-10-02T14:35:21 | 2019-10-02T14:35:21 | 210,346,653 | 0 | 0 | null | 2022-09-16T18:10:44 | 2019-09-23T12:13:33 | Python | UTF-8 | Python | false | false | 267 | #!/Users/christianhovenbitzer/python/hello_flask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())
| [
"christianhovenbitzer@Christians-MBP.fritz.box"
] | christianhovenbitzer@Christians-MBP.fritz.box | |
fb9b993a15edad35bf61dd1c9b5756ab037434ca | 246308810fe62bba6a9cedf1473e90311a934017 | /camera/test_images/draw_image.py | 9ea6011215f4d85446ab761129381e98c4208a33 | [] | no_license | MDB22/MedExpress | 6051b84a9fcf2b7371087d8a0afda3af599f6fc4 | 7c7dcd5f53ae9934b138bbe903726a050b6cedc0 | refs/heads/master | 2021-01-17T22:54:16.328942 | 2016-09-08T13:52:08 | 2016-09-08T13:52:08 | 47,587,845 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
#read an image
img = cv2.imread('test_image.jpg')
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OpenCV',(10,500), font,4,(0,0,0),2,cv2.LINE_AA)
plt.imshow(img, cmap='gray',interpolation='bicubic')
plt.xticks([]), plt.yticks([])
plt.show()
| [
"nathanmurfey@hotmail.com"
] | nathanmurfey@hotmail.com |
b5115902acdf2e1b2a89be3f25d525aa1e948b66 | d79520ffb6759925b06110852864ac4c9693a100 | /requests_unittest_agileone.py | 1dad4c7383154398b271eb15204993955e9f7f37 | [] | no_license | ZhuYing95/agileone | 6505fe56b38d5a3e34a4ee0c9e44d16a8960d730 | e9556e8112d73621461099560b0d8b1b6432d207 | refs/heads/master | 2022-04-22T21:10:03.321750 | 2020-04-15T12:22:22 | 2020-04-15T12:25:08 | 255,907,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,204 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import HTMLTestRunner
import requests
import os
import time
import unittest
# 封装requests库的get和post请求方法为一个自定义类
class Connection:
def __init__(self, host, port=80):
self.base_url = 'http://%s:%d' % (host, port)
self.session = requests.session()
def get(self, url, params=None, headers=None):
if headers is None:
headers = {}
response = self.session.get(
self.base_url + url, params=params, headers=headers)
return response
def post(self, url, data, headers=None):
if headers is None:
headers = {}
response = self.session.post(
self.base_url + url, data=data, headers=headers)
return response
# 清理资源,关闭会话
def close(self):
self.session.close()
# 构造一个针对Agileone的测试类
class Agileone(unittest.TestCase):
"""Agileone接口测试示例"""
con = None
@classmethod
def setUpClass(cls):
cls.con = Connection('192.168.1.105')
@classmethod
def tearDownClass(cls):
cls.con.close()
def test_01_access_agileone(self):
"""打开Agileone网站主页"""
resp = self.con.get('/agileone/')
self.assertEqual(200, resp.status_code)
self.assertEqual('OK', resp.reason)
self.assertIn('AgileOne - Welcome to Login',
resp.content.decode('utf-8'))
def test_02_login_agileone(self):
"""登录Agileone网站"""
test_data = [{'username': '', 'password': '', 'savelogin': False},
{'username': 'a', 'password': '', 'savelogin': False},
{'username': 'admin', 'password': '1a',
'savelogin': False},
{'username': 'ab', 'password': 'admin',
'savelogin': False},
{'username': 'admin', 'password': 'admin',
'savelogin': False}]
for params in test_data:
resp = self.con.post('/agileone/index.php/common/login', params)
self.assertEqual(200, resp.status_code)
self.assertEqual('OK', resp.reason)
if params['username'] == 'admin' and params['password'] == 'admin':
self.assertEqual('successful', resp.text)
elif params['username'] == 'admin' and\
params['password'] != 'admin':
self.assertEqual('password_invalid', resp.text)
else:
self.assertEqual('user_invalid', resp.text)
def test_03_notice_add(self):
"""发布公告"""
# 执行此方法会发现没有公告标题也会成功添加公告的错误
test_data = ['test', '', '0a!kl']
date = time.strftime("%Y-%m-%d", time.localtime(time.time()))
# 方法一
errors = []
for title in test_data:
params = {'headline': title, 'expireddate': date, 'scope': 1}
resp = self.con.post('/agileone/index.php/notice/add', params)
try:
self.assertEqual(200, resp.status_code)
self.assertEqual('OK', resp.reason)
if len(title):
self.assertGreater(int(resp.text), 0)
else:
# 此处传入的headline为空字符串,期望返回错误消息,而不是添加公告成功的id
self.assertRaises(ValueError, int, resp.text)
except AssertionError as e:
errors.append('The server return a response id "%s" when\
title is "%s".' % (resp.text, title))
if len(errors):
raise AssertionError(*errors)
# 方法二
# for title in test_data:
# # 注意这个subTest方法仅在python 3环境下才支持。
# with self.subTest(title, headline=title):
# params = {'headline': title, 'expireddate': date, 'scope': 1}
# resp = self.con.post('/agileone/index.php/notice/add', params)
# self.assertEqual(200, resp.status_code)
# self.assertEqual('OK', resp.reason)
# if len(title):
# self.assertGreater(int(resp.text), 0)
# else:
# # 此处传入的headline为空字符串,期望返回错误消息,而不是添加公告成功的id
# self.assertRaises(ValueError, int, resp.text)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Agileone)
now = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
report_path = os.path.join(os.getcwd(), 'report')
if not os.path.exists(report_path):
os.makedirs(report_path)
report = os.path.join(report_path, 'agileone_test_report_%s.html' % now)
with open(report, "w", encoding='utf8') as f:
runner = HTMLTestRunner.HTMLTestRunner(title='agileone',
description='Test Report',
stream=f, verbosity=2)
runner.run(suite)
| [
"1291289080@qq.com"
] | 1291289080@qq.com |
6eae886e1c4593ab201a1e978c02d9c0bad900f6 | 335e91f503ae34fa4bd267396d1faabdf105fb5c | /data/process_data.py | b59d7a65666846f734e395335df2999201ab8b28 | [] | no_license | FRANZKAFKA13/Udacity-Data-Science-Proj-2 | d5c6786419796acfca73c62da3500679a4e4e37f | b87d58e18ff06f3dbd0e1a5d6e647ed4c4ff364e | refs/heads/master | 2023-03-03T09:33:38.619277 | 2021-02-14T19:50:38 | 2021-02-14T19:50:38 | 334,509,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,363 | py | import sys
import pandas as pd
from sqlalchemy import create_engine
def extract_data(messages_filepath, categories_filepath):
"""
Extract step of the ETL pipeline.
:param messages_filepath: The filepath of the message.csv file
:param categories_filepath: The filepath of the categories.csv file
:return: The two .csv files combined in a pandas dataframe
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
print(' ' + str(messages.shape[0]) + ' messages extracted from .csv.')
print(' ' + str(categories.shape[0]) + ' categories extracted from .csv.')
df = messages.merge(categories, how='left', on='id')
return df
def transform_data(df):
"""
Transform step of the ETL pipeline.
:param df: The dataframe created from the .csv files
:return: Returns a cleaned dataframe with categories transformed to dummy variables
"""
category_df = df['categories'].str.split(pat=";", expand=True)
categories_raw = category_df.loc[0]
categories_clean = [x[:-2] for x in categories_raw]
category_df.columns = categories_clean
print(' ' + str(category_df.shape[1]) + ' categories transformed to dummy variables.')
for category in category_df:
category_df[category] = category_df[category].str[-1]
category_df[category] = category_df[category].astype('int32')
df = df.drop(labels=['categories'], axis=1)
df = pd.concat([df, category_df], axis=1)
duplicate_count = df.duplicated('id').sum()
df = df.drop_duplicates('id')
print(' ' + str(duplicate_count) + ' duplicates dropped.')
faulty_message_count = (df['related'] == 2).sum()
df = df[df['related'] != 2]
print(' ' + str(faulty_message_count) + ' faulty messages dropped.')
return df
def load_data(df, database_filename):
"""
Extract step of the ETL pipeline, saving the cleaned dataset in a SQLite database.
:param df: The cleaned dataframe
:param database_filename: The target database for the load step of the ETL process
:return:
"""
engine = create_engine('sqlite:///' + str(database_filename))
try:
df.to_sql('messages_categorized', engine, index=False, if_exists='replace')
print('Cleaned data saved to database!')
except ValueError:
print("Warning: Database error.")
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Extracting data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = extract_data(messages_filepath, categories_filepath)
print('Transforming data...')
df = transform_data(df)
print('Loading data...\n DATABASE: {}'.format(database_filepath))
load_data(df, database_filepath)
else:
print('Please provide the filepaths of the messages and categories '
'datasets as the first and second argument respectively, as '
'well as the filepath of the database to save the cleaned data '
'to as the third argument. \n\nExample run command:\npython process_data.py disaster_messages.csv '
'disaster_categories.csv disaster_response.db')
if __name__ == '__main__':
main()
| [
"carstengranig@gmail.com"
] | carstengranig@gmail.com |
5bbc71c559fd0f6b93689846f8d9309ea7d173fc | da2e4fad2d906c045365b4caa69bbfb559aa64a9 | /av.py | a9fbbfbe8480f36c67ff5851acbec8bfcfcc93ff | [] | no_license | AGprofessional/PythonTradingBot | ab9fefca54e5420f087e9158b25de2cec8ff0ade | ecf63abd558d3923e865c91156e577dcc597a7dd | refs/heads/main | 2023-07-14T18:18:26.761859 | 2021-08-21T04:28:17 | 2021-08-21T04:28:17 | 398,464,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,738 | py | import pandas as pd
import requests
import io
from glom import glom
#this works
#https://www.alphavantage.co/documentation/#fx-intraday
#https://www.youtube.com/watch?v=d2kXmWzfS0w
from alpha_vantage.foreignexchange import ForeignExchange
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.cryptocurrencies import CryptoCurrencies
from alpha_vantage.techindicators import TechIndicators
import time
api_key = 'YVR9BGPFI9BA1KH9'
#forex
#fx = ForeignExchange(key=api_key, output_format='pandas')
#FXdata, FXmeta_data = fx.get_currency_exchange_intraday(from_symbol='EUR', to_symbol='USD',interval='1min', outputsize='full')
#crypto:
def getCrypto():
url = 'https://www.alphavantage.co/query?function=CRYPTO_INTRADAY&symbol=ETH&market=USD&interval=1min&outputsize=compact&apikey={}'.format(
api_key)
r = requests.get(url)
Cjson = r.json()
CtimeSeries = Cjson['Time Series Crypto (1min)']
#print(CtimeSeries)
#Cfirstkey=list(CtimeSeries.keys())[0]
#Cfirstval=list(CtimeSeries.values())[0]
#print(Cfirstkey, Cfirstval)
Cnext=next(iter(CtimeSeries.items()))
# print(type(Cnext))
CnextValue = Cnext[1]
getCrypto.open = CnextValue['1. open']
print("---",getCrypto.open)
high = CnextValue['2. high']
low = CnextValue['3. low']
close = CnextValue['4. close']
volume = CnextValue['5. volume']
#print("c next open-->", CnextOpen)
#stocks
def getStocks():
stock = TimeSeries(key=api_key, output_format='pandas')
Sdata, Smeta_data = stock.get_intraday(symbol='MRIN', interval='1min', outputsize='full')
currCandle = Sdata.head(1)
high = currCandle['2. high']
low = currCandle['3. low']
close = currCandle['4. close']
open = currCandle['1. open']
def getData():
#getCrypto()
#tech indicators
tech = TechIndicators(key=api_key, output_format='pandas')
TdataEMA, Tmeta_dataEMA = tech.get_ema(symbol='MRIN', interval='1min', time_period='15', series_type='close')
currEMA = TdataEMA.tail(1)
#print(currEMA)
TdataRSI, Tmeta_dataRSI = tech.get_rsi(symbol='MRIN', interval='1min', time_period='14', series_type='close')
currRSI = TdataRSI.tail(1)
#print(TdataRSI)
#print(currRSI)
TdataMACD, Tmeta_dataMACD = tech.get_macd(symbol='MRIN', interval='1min', series_type='close', fastperiod='12', slowperiod='26', signalperiod='9')
currMACD = TdataMACD.head(1)
# print(TdataMACD)
# print(currMACD)
#getData()
#print(data)
#time.sleep(60)
#minute=3
#while (minute>0):
# data, meta_data = ts.get_currency_exchange_intraday(from_symbol='EUR', to_symbol='USD',interval='1min', outputsize='compact')
# most_recent=data.iloc[0]
# print(most_recent)
# time.sleep(60)
# minute=minute-1
| [
"noreply@github.com"
] | noreply@github.com |
a9d2eeab18066cbc76789aba31dd51329d4f3780 | 9f0b9a8fe27336b8a231a33c6f693ed019a61b6e | /blacklinetest.py | f6eb1fa445e64a1ab1daa8cf7cc3bd44fcadc93b | [] | no_license | Duong-NVH/tool-set | e2647cf74fa085eab42fe3f19c852634629e956e | e7c5f7f4522e75eefe74e808a07ecf6575c4ebf5 | refs/heads/main | 2023-06-15T07:37:30.783287 | 2021-07-09T15:58:12 | 2021-07-09T15:58:12 | 382,987,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import cv2
import numpy as np
img = cv2.imread('blacklinetest.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, 500)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imwrite('houghlines3.jpg', img)
| [
"you@example.com"
] | you@example.com |
c45b5b7aec7cf94b501dec46b90a47078258aa52 | 70abec2e6d9b0c8effa27bdacb8bf9f3ae09c3ee | /api/opinion/apps.py | eb8c70b2f0878c1f1dcdd9cc04cb3e44a5c1ea09 | [] | no_license | lufepama/django-infojobs-clone-backend | 730a336c869c54873a952043241f7909c8be3dfe | 79d41f7f78b0ce8d71ae39760366ef4008769ed9 | refs/heads/master | 2023-08-22T06:17:47.392803 | 2021-10-16T01:35:14 | 2021-10-16T01:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class OpinionConfig(AppConfig):
name = 'opinion'
| [
"lufepama31@gmail.com"
] | lufepama31@gmail.com |
9b9812e5c6c7e010b1d4272c9d1eb1a2bd8e4047 | 98be616c9e077db836803ec56452564b291e3ff9 | /Python+/Python3/src/module/builtins/collection/lists/list_typed.py | 198006e815badc59d1731ae055d876a0bd1c6caf | [] | no_license | grozhnev/yaal_examples | adef871b0f23f866491cc41f6c376964ace3fbe2 | fe337ee825b3c063d7ed0c3e7e54bef440421894 | refs/heads/master | 2021-03-30T12:20:42.402132 | 2019-12-26T07:01:54 | 2019-12-26T15:08:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | # Strongly typed List
# Strongly typed list
from typing import List
typed_list: List[RuntimeError] = [RuntimeError()]
| [
"aleksei_iablokov@epam.com"
] | aleksei_iablokov@epam.com |
148dd580eb7efdf224a1b60c6aa54dc584d2a4e3 | 6aa8605b7fa86ec6568247941c8c0dbde3fc9bd1 | /bbn/camb_to_class.py | 6f6d9dc6a1f804fc4b41f2f7c689bcb787104286 | [] | no_license | anirbandas89/BDM_CLASS | dfa3a0a4a24060d622829ce4bd7c9d0e74780bd1 | 33404bdf7dea762413cb9dd3aff9957fae7221ad | refs/heads/master | 2020-05-16T09:31:47.898864 | 2019-06-08T07:25:09 | 2019-06-08T07:25:09 | 182,951,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | import numpy as np
data = np.loadtxt('BBN_full_Parthenelope_880.3.dat')
f = open('BBN_full_Parthenelope_880.3_class_format.dat', 'w')
#print data[0]
for i in range(len(data)):
f.write(str(data[i,0])+' ')
f.write(str(data[i,2])+' ')
f.write(str(data[i,3])+'\n')
print len(data)
#f.write( str(data[0,0])+' ' )
#f.write(str(data[0,2])+' ')
#f.write(str(data[0,3])+'\n')
#f.write(str(data[1,0]))
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
d75546d084acf87c9f272a76978aafe55c63e533 | 880495f69099e6c1efbcfd8fb763a5de6df12786 | /code/23-merge-k-sorted-lists.py | 922f35eca621e5ff82fc343c587b6cbf43d40454 | [] | no_license | shen-ee/LC | 69209f474e1ec4265d45e072cb65105b2a28c621 | 8664bde56453dea18f069725470d0401e65d45e2 | refs/heads/master | 2022-11-07T20:27:24.029396 | 2019-01-16T08:14:47 | 2019-01-16T08:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | import Queue
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# # Solution 1
# q = Queue.PriorityQueue()
# root = ListNode(None)
# curnode = root
# for node in lists:
# if node is None :
# continue
# q.put((node.val,node))
# while q.qsize() > 0:
# curnode.next = q.get()[1]
# curnode = curnode.next
# if curnode.next:
# q.put((curnode.next.val, curnode.next))
# return root.next
# Solution 2
ans = []
for list in lists:
while list:
ans.append(list.val)
list = list.next
head = root = ListNode(None)
for i in sorted(ans):
head.next = ListNode(i)
head = head.next
return root.next
| [
"jerry31@qq.com"
] | jerry31@qq.com |
00f9cf22aeaf7658d0bb303616a50711fe27b39a | f3e3f61d4cec65ad5b924c0564d99c541ccdb625 | /src_4/chapter04/migrations/0001_initial.py | 6e2758ee1268c74d81ccd745fea58bbf8c096da7 | [] | no_license | uxx53/bur1 | 15b0a96b4c0afdcbd1b4e230237725f2f013b593 | 1b013f87792204bdbc179e678caa80616eb62279 | refs/heads/master | 2016-09-05T11:13:21.706850 | 2015-08-14T10:36:43 | 2015-08-14T10:36:43 | 40,514,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('name', models.CharField(max_length=100)),
('title', models.CharField(max_length=3)),
('birth_date', models.DateField(blank=True, null=True)),
],
),
]
| [
"uxx@mail.ru"
] | uxx@mail.ru |
8cab5cb127fb2a4d84d2c87d1da7e4fe38a79492 | dfb306fbc6f85f4ff66ab659911909cf4dc1e7c1 | /180104.py | 03d231ed48308b93ea26c7137615b101c0937874 | [
"MIT"
] | permissive | majaldm/ML | d83e4577361e6d15725eb1d52caf8ead2f828de2 | 1f99f36c90b226acf151c3d65ed767f635a1469c | refs/heads/master | 2021-05-05T21:34:11.524219 | 2018-01-04T07:17:18 | 2018-01-04T07:17:18 | 115,596,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py |
# coding: utf-8
# In[14]:
get_ipython().system(' pip install --user flask')
# In[31]:
from flask import Flask, request, make_response, jsonify
app = Flask('flask-api')
@app.route('/')
def hello_world():
message = {'message': 'hello world'}
return jsonify(message)
from ptt_crawler import PttCrawler
@app.route('/ptt_crawler', methods=['GET','POST'])
def run_crawler():
if request.method == 'GET':
crawler = PttCrawler('Gossiping', page=1)
elif request.method == 'POST':
board = request.get_json().get('board','Gossiping')
page = request.get_json().get('page','1')
crawler = PttCrawler(board, page=page)
result = crawler.run()
return jsonify(result)
if __name__ == '__main__':
app.run(port=8000)
# In[37]:
print(app)
print('-------------------')
print(vars(app))
print('-------------------')
print(', '.join("%s: %s" % item for item in vars(app).items()))
print('-------------------')
print(app.__dict__)
print('-------------------')
print(dir(app))
# In[43]:
print(app.test_client())
print('-------------------')
print(vars(app.test_client()))
print('-------------------')
print(', '.join("%s: %s" % item for item in vars(app.test_client()).items()))
print('-------------------')
print(app.test_client().__dict__)
print('-------------------')
print(dir(app))
# In[45]:
print(app.test_client().get('/'))
print('-------------------')
print(vars(app.test_client().get('/')))
print('-------------------')
print(', '.join("%s: %s" % item for item in vars(app.test_client().get('/')).items()))
print('-------------------')
print(app.test_client().get('/').__dict__)
print('-------------------')
print(dir(app))
# In[13]:
import json
resp = app.test_client().get('/')
print(resp.data)
print(resp.data.decode())
print(json.loads(resp.data.decode()))
# In[10]:
from ptt_crawler import PttCrawler
crawler = PttCrawler('Gossiping', page=1)
result = crawler.run()
print(result)
# In[11]:
resp = app.test_client().get('/ptt_crawler')
print(resp.data)
print(json.loads(resp.data.decode()))
# In[32]:
resp = app.test_client().post(
'/ptt_crawler',
data=json.dumps({'board':'Baseball','page':3}),
content_type='application/json'
)
print(json.loads(resp.data.decode()))
| [
"noreply@github.com"
] | noreply@github.com |
c6ecf3c59e8d315c1650c67532864af71b386c05 | 4e8b37ca121be19cd3b4e73a6592be2659d8134c | /backend/Techfesia2019/accounts/migrations/0005_auto_20190701_1708.py | a7113d24504a420a0d91b930fb768ac3673981f3 | [
"MIT"
] | permissive | masterashu/Techfesia2019 | 365b9b8dc1cb0bc6b613c72632e8b7a2a2a70905 | 8fd82c4867c8d870b82a936fc0f9e80f11ae03e7 | refs/heads/backend-event-registrations | 2020-06-10T20:58:40.850415 | 2019-07-27T23:00:21 | 2019-07-27T23:00:21 | 193,744,800 | 1 | 1 | MIT | 2019-06-29T17:12:31 | 2019-06-25T16:29:12 | Python | UTF-8 | Python | false | false | 466 | py | # Generated by Django 2.2.2 on 2019-07-01 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20190701_0956'),
]
operations = [
migrations.AlterField(
model_name='institute',
name='name',
field=models.CharField(default='Indian Institute of Information Technology, Sri City', max_length=200, unique=True),
),
]
| [
"masterashu@live.in"
] | masterashu@live.in |
9c5a610bba7b0c3825198a36fe15ea9d099394bc | 172b9f849cbe48ef85cf7cca776a432bdad4587b | /lunchy/sublunchy/shortcuts.py | e702d0e3db66ae63537816ac036361452bb41069 | [] | no_license | cpasbanal/lunchy | 7ec9a0d1dba9e0dc6ed0b84698da928046829eb8 | c0f10c25866640bc8f3c8a34f97bc6fe2397959f | refs/heads/master | 2021-06-01T02:49:07.479605 | 2016-08-17T12:56:50 | 2016-08-17T12:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | ''' Create shortcuts in and out between user message and wit
out : message from Wit to user --> add keys to quick replies
in : use the keys to force a story in Wit
'''
# TODO store in database the mapping and make a web interface of this module
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger("lunchy")
class Shortcut():
def __init__(self, *args, **kwargs):
self.keys_out = {
"Quelle est la météo ?" : "weather",
"Un déj aléatoire ?" : "randomlunch",
} | [
"jcroyere@gmail.com"
] | jcroyere@gmail.com |
91aefd24b6e56d4907a756aa0de57697ad4bcc97 | cead5aa67c0c45038917f306fefe31b53b5b639f | /temp/PyQt/Qthread.py | b5f82a26132421016342acaef9e4b384791399a7 | [] | no_license | songaal/AutoLogin | 0a79400a88f1823aebfb819c98b489aeb267a679 | 778f30dd92dc4dba3d0a632511113bfe145d1b94 | refs/heads/master | 2022-02-27T23:13:54.152894 | 2019-09-22T16:27:41 | 2019-09-22T16:27:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
''' https://freeprog.tistory.com/351
'''
class MyMainGUI(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.qtxt1 = QTextEdit(self)
self.btn1 = QPushButton("Start", self)
self.btn2 = QPushButton("Stop", self)
self.btn3 = QPushButton("add 100", self)
self.btn4 = QPushButton("send instance", self)
vbox = QVBoxLayout()
vbox.addWidget(self.qtxt1)
vbox.addWidget(self.btn1)
vbox.addWidget(self.btn2)
vbox.addWidget(self.btn3)
vbox.addWidget(self.btn4)
self.setLayout(vbox)
self.setGeometry(100,50,300,300)
# self.show()
class Test:
def __init__(self):
name = ""
job = ""
class MyMain(MyMainGUI):
add_sec_signal = pyqtSignal()
send_instance_signal = pyqtSignal("PyQt_PyObject")
def __init__(self, parent=None):
super().__init__(parent)
self.btn1.clicked.connect(self.time_start)
self.btn2.clicked.connect(self.time_stop)
self.btn3.clicked.connect(self.add_sec)
self.btn4.clicked.connect(self.send_instance)
self.th = Worker(parent=self)
self.th.sec_changed.connect(self.time_update)
self.add_sec_signal.connect(self.th.add_sec)
self.send_instance_signal.connect(self.th.recive_instance_signal)
self.show()
@pyqtSlot()
def time_start(self):
self.th.start()
self.th.working = True
@pyqtSlot()
def time_stop(self):
self.th.working = False
@pyqtSlot()
def add_sec(self):
print(".... add signal emit ....")
self.add_sec_signal.emit()
@pyqtSlot(str)
def time_update(self, msg):
self.qtxt1.append(msg)
@pyqtSlot()
def send_instance(self):
t1 = Test()
t1.name = "SuperPower!!!"
t1.job = "cta"
self.send_instance_signal.emit(t1)
class Worker(QThread):
sec_changed = pyqtSignal(str)
def __init__(self, sec=0, parent=None):
super().__init__()
self.main = parent
self.working = True
self.sec = sec
def __del__(self):
print('end thread.....')
self.wait()
def run(self):
while self.working:
self.sec_changed.emit('time (secs) : {}'.format(self.sec))
self.sleep(1)
self.sec += 1
@pyqtSlot()
def add_sec(self):
print('add sec...')
self.sec += 100
@pyqtSlot("PyQt_PyObject")
def recive_instance_signal(self, inst):
print(inst.name)
print(inst.job)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = MyMain()
app.exec_() | [
"taxkmj@naver.com"
] | taxkmj@naver.com |
bc863a6118cbe18297247f263fbc963beca38953 | 0940b1cdfee8d7deb488f396736b0df7c01b8bb6 | /Exam/faculty/forms.py | aa76c4ab78faa88142ef132c0f9d3a64fe8268e3 | [] | no_license | srilaasya/Online-examination-system | 500767f77c2e2a261ddaf90029542d4e9d5312ae | f6ef76f6569141c02c861c67c2b5f7aa375c719a | refs/heads/main | 2023-08-28T21:14:48.884215 | 2021-11-02T23:57:38 | 2021-11-02T23:57:38 | 395,973,123 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from django import forms
from .models import FacultyInfo
from django.contrib.auth.models import User
class FacultyForm(forms.ModelForm):
class Meta():
model = User
fields = ['username', 'email', 'password']
widgets = {
'password': forms.PasswordInput(attrs = {'id':'passwordfield','class':'form-control'}),
'email' : forms.EmailInput(attrs = {'id':'emailfield','class':'form-control'}),
'username' : forms.TextInput(attrs = {'id':'usernamefield','class':'form-control'})
}
class FacultyInfoForm(forms.ModelForm):
class Meta():
model = FacultyInfo
fields = ['address','subject','picture']
widgets = {
'address': forms.Textarea(attrs = {'class':'form-control'}),
'subject' : forms.TextInput(attrs = {'class':'form-control'})
}
| [
"nutheti.laasya@gmail.com"
] | nutheti.laasya@gmail.com |
8af8855e074aad7b7515f888ec0f24f85164debb | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/web/v20210115/web_app_relay_service_connection_slot.py | dda61e329470f348eb6bc50714d28126c870113f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,719 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['WebAppRelayServiceConnectionSlotArgs', 'WebAppRelayServiceConnectionSlot']
@pulumi.input_type
class WebAppRelayServiceConnectionSlotArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
slot: pulumi.Input[str],
biztalk_uri: Optional[pulumi.Input[str]] = None,
entity_connection_string: Optional[pulumi.Input[str]] = None,
entity_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_connection_string: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppRelayServiceConnectionSlot resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API will create or update a hybrid connection for the production slot.
:param pulumi.Input[str] kind: Kind of resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "slot", slot)
if biztalk_uri is not None:
pulumi.set(__self__, "biztalk_uri", biztalk_uri)
if entity_connection_string is not None:
pulumi.set(__self__, "entity_connection_string", entity_connection_string)
if entity_name is not None:
pulumi.set(__self__, "entity_name", entity_name)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if port is not None:
pulumi.set(__self__, "port", port)
if resource_connection_string is not None:
pulumi.set(__self__, "resource_connection_string", resource_connection_string)
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def slot(self) -> pulumi.Input[str]:
"""
Name of the deployment slot. If a slot is not specified, the API will create or update a hybrid connection for the production slot.
"""
return pulumi.get(self, "slot")
@slot.setter
def slot(self, value: pulumi.Input[str]):
pulumi.set(self, "slot", value)
@property
@pulumi.getter(name="biztalkUri")
def biztalk_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "biztalk_uri")
@biztalk_uri.setter
def biztalk_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "biztalk_uri", value)
@property
@pulumi.getter(name="entityConnectionString")
def entity_connection_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "entity_connection_string")
@entity_connection_string.setter
def entity_connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_connection_string", value)
@property
@pulumi.getter(name="entityName")
def entity_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "entity_name")
@entity_name.setter
def entity_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_name", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="resourceConnectionString")
def resource_connection_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_connection_string")
@resource_connection_string.setter
def resource_connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_connection_string", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_type", value)
class WebAppRelayServiceConnectionSlot(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
biztalk_uri: Optional[pulumi.Input[str]] = None,
entity_connection_string: Optional[pulumi.Input[str]] = None,
entity_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_connection_string: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Hybrid Connection for an App Service app.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API will create or update a hybrid connection for the production slot.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppRelayServiceConnectionSlotArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Hybrid Connection for an App Service app.
:param str resource_name: The name of the resource.
:param WebAppRelayServiceConnectionSlotArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppRelayServiceConnectionSlotArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
biztalk_uri: Optional[pulumi.Input[str]] = None,
entity_connection_string: Optional[pulumi.Input[str]] = None,
entity_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_connection_string: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppRelayServiceConnectionSlotArgs.__new__(WebAppRelayServiceConnectionSlotArgs)
__props__.__dict__["biztalk_uri"] = biztalk_uri
__props__.__dict__["entity_connection_string"] = entity_connection_string
__props__.__dict__["entity_name"] = entity_name
__props__.__dict__["hostname"] = hostname
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["port"] = port
__props__.__dict__["resource_connection_string"] = resource_connection_string
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_type"] = resource_type
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__.__dict__["slot"] = slot
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20210115:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20210101:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-native:web/v20210201:WebAppRelayServiceConnectionSlot"), pulumi.Alias(type_="azure-nextgen:web/v20210201:WebAppRelayServiceConnectionSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppRelayServiceConnectionSlot, __self__).__init__(
'azure-native:web/v20210115:WebAppRelayServiceConnectionSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppRelayServiceConnectionSlot':
"""
Get an existing WebAppRelayServiceConnectionSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppRelayServiceConnectionSlotArgs.__new__(WebAppRelayServiceConnectionSlotArgs)
__props__.__dict__["biztalk_uri"] = None
__props__.__dict__["entity_connection_string"] = None
__props__.__dict__["entity_name"] = None
__props__.__dict__["hostname"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["resource_connection_string"] = None
__props__.__dict__["resource_type"] = None
__props__.__dict__["type"] = None
return WebAppRelayServiceConnectionSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="biztalkUri")
def biztalk_uri(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "biztalk_uri")
@property
@pulumi.getter(name="entityConnectionString")
def entity_connection_string(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "entity_connection_string")
@property
@pulumi.getter(name="entityName")
def entity_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "entity_name")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter(name="resourceConnectionString")
def resource_connection_string(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_connection_string")
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | noreply@github.com |
da2965f9791254063164c9442a3f7a753f730ebb | aa6422117b534e4f4eed197b71f9fcf00eb1983a | /build/flexbe_behavior_engine/flexbe_input/catkin_generated/pkg.installspace.context.pc.py | 1aee055bb7df44fed4d20a68d3c6beee1540c93f | [] | no_license | Sinchiguano/StateMachineFlexBe | c07385b09e1ab15e88e894da8fd021d1cbf0de28 | d637acf2f26a3f0d83ef4f2d34a2636dff2515f6 | refs/heads/master | 2020-09-26T02:49:46.882388 | 2019-12-05T16:44:23 | 2019-12-05T16:44:23 | 226,146,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/casch/catkin_ws/install/include".split(';') if "/home/casch/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "flexbe_input"
PROJECT_SPACE_DIR = "/home/casch/catkin_ws/install"
PROJECT_VERSION = "1.2.2"
| [
"cesarsinchiguano@hotamil.es"
] | cesarsinchiguano@hotamil.es |
2d337c23eb9d0b0e67728ffd5b44be81efb0d9cf | 1f32a4a5d54e4690b68e8e3e92524535bb1ca8f0 | /akai-to-wLightBox.py | adeb80dd9cac282e14f0e22fc0955abcfc30cc9e | [] | no_license | EdwardEisenhauer/AKAI-LPD8 | afeb2d193141e8b2f8d72c1ed84cf3273aa17604 | 2dd6db4926d12e15b1227997d92cb4a1aa52f4d2 | refs/heads/master | 2023-01-14T09:23:04.085039 | 2020-03-27T15:31:42 | 2020-03-27T15:31:42 | 247,360,448 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | from threading import Thread
from queue import Queue
from time import sleep
from blebox import LightBox
from akai import Akai
import sys
ip = sys.argv[1]
pasek = LightBox(ip)
akai = Akai()
"""
Read knob values and send the GET to set the Lights.
Monitor the knobs values. If anyone changes send a GET to set the lights. Wait 100ms before next packet.
TODO:
- Detect incoming MIDI data at the mido level?
- Do something about this ugly time.sleep(0.1)
- Introduce an extrapolating method
:return:
"""
def knob_to_ms(value):
knob_min = 0
knob_max = 127
ms_min = 0
ms_max = 1000
return int(value/(knob_max-knob_min)*(ms_max-ms_min))
def knob_to_effect(value):
knob_min = 0
knob_max = 127
effect_id_min = 0
effect_id_max = 6
return int(value/(knob_max-knob_min)*(effect_id_max-effect_id_min))
def producer(output_queue):
[r, g, b, w] = [i * 2 for i in akai.knobs][:4]
w_prev = w
while True:
# Oh my ficking god do something about this!!!
if akai.knobs_color_change:
[r, g, b, w] = [i * 2 for i in akai.knobs][:4] # Get RGB values from the AKAI
if w == w_prev:
output_queue.put([r, g, b])
else:
output_queue.put([w, w, w])
w_prev = w
akai.knobs_color_change = False
elif akai.knobs_durations_change:
[color_fade, effect_fade, effect_step] = map(knob_to_ms, akai.knobs[4:7])
effect_id = knob_to_effect(akai.knobs[7])
pasek.color_fade = color_fade
pasek.effect_fade = effect_fade
pasek.effect_step = effect_step
pasek.set_durations() # It shouldn't be like this XD
pasek.effect_id = effect_id
pasek.set_effect() # Jak to się nie zesra to będzie cud XD
akai.knobs_durations_change = False
pasek.print_state()
elif akai.pads_change:
if akai.pads[3]:
output_queue.put([255, 255, 255])
else:
output_queue.put(map(lambda x: 255 if x else 0,akai.pads[:3]))
akai.pads_change = False
sleep(0.1) # Do something about it!
def consumer(input_queue):
while True:
[r, g, b] = input_queue.get()
pasek.set_colors(r, g, b)
input_queue.task_done()
# def print_state():
# while True:
# akai.print_state()
# sleep(3)
if __name__ == '__main__':
q = Queue()
Thread(target=producer, args=(q,)).start()
Thread(target=consumer, args=(q,)).start()
Thread(target=akai.listen).start()
| [
"sergiusz.warga@gmail.com"
] | sergiusz.warga@gmail.com |
8808eb29a9c4f90993a64cfb9ff593217d62ad7e | 1da96c1ffecc565307779e3a011e2af2bc75dbcf | /venv/Scripts/django-admin.py | 825265bd4c5465fc94f5a2bd2dc2ea1b1afd5c95 | [] | no_license | yfs666/py3workspace | 058809e11e6d8e6a8c4f96e5ade679c82c5538b2 | d64a9165fac239fb7e49f4d4b99921bdc2ed07d2 | refs/heads/master | 2020-09-03T04:43:13.072897 | 2019-11-10T14:32:38 | 2019-11-10T14:32:38 | 219,387,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | #!D:\tools\py3workspace\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"yangfengshuai@xiangshang360.com"
] | yangfengshuai@xiangshang360.com |
d7aab2532f25c287a63c8bd8d282163103684f29 | d7567ee75e48bd7872a1c332d471ff3ce7433cb9 | /checkout/urls.py | 233bfb99df176d4ab47c4bae44affd20f8155e9c | [] | no_license | sarahbarron/ecommerce | 30cd0ff26afa5ec9031165b63ecde8c0f7f6086f | aba5370fd731e7ec9e677041504f6c3457b0d405 | refs/heads/master | 2020-03-17T21:10:56.385918 | 2020-01-17T18:35:28 | 2020-01-17T18:35:28 | 133,947,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.conf.urls import url
from .views import checkout
urlpatterns = [
url(r'^$', checkout, name='checkout'),
] | [
"sarahflavin@yahoo.com"
] | sarahflavin@yahoo.com |
a19eba7bf0ec98307d379e2f857ae2b25e827d02 | c8b64c3c4c0639eae0046f9c6de06a8a3cd7cef7 | /Code/Othello/bin/easy_install | 359dee08207374ef40fc885f7b3c83642e623946 | [] | no_license | nicholaskrieg/MLAI | 24bf1c2f72eb0b3c54e109d7f65e46794d4f1846 | 21fe04af11643d6dd3d3e2363d9d1bb4f0daa45b | refs/heads/master | 2021-05-09T06:38:24.669883 | 2018-03-19T21:29:25 | 2018-03-19T21:29:25 | 119,329,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | #!/Users/nicholaskrieg/Documents/CS/MLAI/Orthello/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nicholas.krieg@gmail.com"
] | nicholas.krieg@gmail.com | |
18b985fd2a25b161ab12d7f4f4e09fc83c30cc2e | 3b21cbe5320137a3d8f7da40558294081211f63f | /Chapter04/AutoencMnist.py | daebd29ec15d7b88a838e6b5aa4a4d8016f69927 | [
"MIT"
] | permissive | Evelynatrocks/Python-Machine-Learning-Cookbook-Second-Edition | d06812bba0a32a9bd6e5e8d788769a07d28084cd | 99d8b799dbfe1d9a82f0bcc3648aaeb147b7298f | refs/heads/master | 2023-04-06T20:23:05.384943 | 2021-01-18T12:06:36 | 2021-01-18T12:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | from keras.datasets import mnist
(XTrain, YTrain), (XTest, YTest) = mnist.load_data()
print('XTrain shape = ',XTrain.shape)
print('XTest shape = ',XTest.shape)
print('YTrain shape = ',YTrain.shape)
print('YTest shape = ',YTest.shape)
import numpy as np
print('YTrain values = ',np.unique(YTrain))
print('YTest values = ',np.unique(YTest))
unique, counts = np.unique(YTrain, return_counts=True)
print('YTrain distribution = ',dict(zip(unique, counts)))
unique, counts = np.unique(YTest, return_counts=True)
print('YTrain distribution = ',dict(zip(unique, counts)))
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(121)
plt.hist(YTrain, alpha=0.8, ec='black')
plt.xlabel("Classes")
plt.ylabel("Number of occurrences")
plt.title("YTrain data")
plt.subplot(122)
plt.hist(YTest, alpha=0.8, ec='black')
plt.xlabel("Classes")
plt.ylabel("Number of occurrences")
plt.title("YTest data")
plt.show()
XTrain = XTrain.astype('float32') / 255
XTest = XTest.astype('float32') / 255
XTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))
XTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))
from keras.layers import Input
from keras.layers import Dense
from keras.models import Model
InputModel = Input(shape=(784,))
EncodedLayer = Dense(32, activation='relu')(InputModel)
DecodedLayer = Dense(784, activation='sigmoid')(EncodedLayer)
AutoencoderModel = Model(InputModel, DecodedLayer)
AutoencoderModel.summary()
AutoencoderModel.compile(optimizer='adadelta', loss='binary_crossentropy')
history = AutoencoderModel.fit(XTrain, XTrain,
batch_size=256,
epochs=100,
shuffle=True,
validation_data=(XTest, XTest))
DecodedDigits = AutoencoderModel.predict(XTest)
n=5
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(XTest[i+10].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(DecodedDigits[i+10].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | [
"joecasillas001@gmail.com"
] | joecasillas001@gmail.com |
ab932c024897c581d9bb5dd95eef2ee759d421c2 | bac5ecb5eef06dfe76b9b7bff80faee7485c67dd | /.history/django_vuejs_tutorial/django_vuejs/dataiku/models_20200829125121.py | c7e0a2d229648bf8a2326333ab23d5a72731658d | [] | no_license | MChrys/dataiku | fb1e48401d544cbcc5a80a0a27668dc9d2d196e5 | 6091b24f565224260a89246e29c0a1cbb72f58ed | refs/heads/master | 2022-12-16T11:06:13.896643 | 2020-09-12T19:03:51 | 2020-09-12T19:03:51 | 293,287,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,147 | py | from django.db import models
from django.utils import timezone
class Task(models.Model):
'''
All task that could be apply to a specific account
'''
name = models.CharField(max_length=60, primary_key=True)
description = models.CharField(max_length=510, null=True, blank=True)
supertask = models.ForeignKey('self',null=True, blank=True, on_delete=models.SET_NULL)
class Dataiku_account(models.Model):
STATUS = (
('in operation', 'in operation'),
('avaible', 'avaible')
)
email = models.CharField(max_length=60, primary_key=True)
password = models.CharField(max_length=255, null=True, blank=True)
#task = models.CharField(max_length=255, null=True, blank=True)
status = models.CharField(max_length=255, null=True, blank=True, choices = STATUS)
def __str__(self):
return self.email
class Operation(models.Model):
'''
A running Task like : validate this course or take this QCM
'''
creation = models.DateTimeField( editable = False)
STATUS = (
('pending', 'pending'),
('running', 'running'),
('done', 'done')
)
task = models.OneToOneField(Task, null=True, blank=True,on_delete=models.SET_NULL)
account = models.ForeignKey(Dataiku_account, on_delete=models.CASCADE)
statut = models.CharField(max_length=255, null=True, blank=True, choices = STATUS)
def save(self, *args, **kwargs):
if not self.id:
self.creation = timezone.now()
return super(User, self).save(*args, **kwargs)
class QCM(models.Model):
LearningPathUrl = models.CharField(max_length=255, null=True, blank=True)
LearningPathName = models.CharField(max_length=255, null=True, blank=True)
CourseUrl = models.CharField(max_length=255, null=True, blank=True)
CourseName = models.CharField(max_length=255, null=True, blank=True)
QcmUrl = models.CharField(max_length=255, null=True, blank=True)
QcmName = models.CharField(max_length=255, null=True, blank=True)
Lenght = models.IntegerField(default =0)
Verif = models.IntegerField(default =0)
status = models.BooleanField(default = False)
def __str__(self):
return "{}_{}_{}".format(self.LearningPathName, self.CourseName,self.QcmName)
class Session(models.Model):
STATUS = (
('running','running'),
('finish','finish')
)
email = models.ForeignKey(Dataiku_account , on_delete=models.CASCADE)
start = models.DateTimeField(editable =False)
countdown = models.CharField(max_length=10, blank=True, null=True, default= '59:59')
score = models.IntegerField(default=0)
lenght = models.IntegerField(default=0)
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.start = timezone.now()
return super(User, self).save(*args, **kwargs)
# Create your models here.
class Question(models.Model):
STATUS = (
('pending', 'pending'),
('check', 'check')
)
CHOICES_TYPE = (
('checkbox', 'checkbox'),
('radio', 'radio')
)
text = models.CharField(max_length=255, primary_key=True)
#session = models.ForeignKey(Session, null=True, blank=True,on_delete = models.SET_NULL)
status = models.CharField(max_length=255, null=True, blank=True, choices = STATUS)
choice_type = models.CharField(max_length=255, null=True, blank=True, default= "radio" ,choices = CHOICES_TYPE)
max_choices = models.IntegerField(default = 0)
cursor = models.IntegerField(default = 1)
# cursor is the number of answer currently explored
qcm_link = models.ForeignKey(QCM, null=True, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.text
class Run(models.Model):
'''
A Run is a try
'''
STATUS = (
(True, 'True'),
(False, 'False')
)
id = models.AutoField(primary_key=True)
#creation = models.DateTimeField(editable =False)
question_link = models.ForeignKey(Question, null=True, blank=True, on_delete=models.CASCADE)
session_link = models.ForeignKey(Session, null=True, blank=True, on_delete=models.CASCADE)
status = models.BooleanField(default = False)
class Posibility(models.Model):
CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
)
rank = models.IntegerField( null=True, default= 1, blank=True, choices = CHOICES)
question_link = models.ForeignKey(Question, null=True, blank=True,on_delete=models.CASCADE)
text = models.CharField(max_length=255, null=True, blank=True)
#rank = models.ForeignKey(Answer, null=True, blank=True,on_delete= models.SET_NULL)
def __str__(self):
return self.text
class Answer(models.Model):
#choice = models.IntegerField( null=True, default= 1, blank=True, choices = CHOICES)
choice = models.ForeignKey(Posibility,to_field='rank',blank=True,null=True,on_delete= models.SET_NULL)
connected_run = models.ForeignKey(Run,to_field='id',blank=True,null=True,on_delete= models.SET_NULL)
def __str__(self):
return self.choice
| [
"cbeltran@umanis.com"
] | cbeltran@umanis.com |
ef79b4514dbb5c1740bdf58c252e4cd319457aba | 8526007f636dc57a30dbd6ef7805ec27d90481af | /pipelines/error_rate_pipeline_load.py | 3ba0129048553611ce344ddc8583461a021c1fd1 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | alphagov/app-performance-summary | d4fbb26e73de20ef4e41334e4ad599254565de64 | e94c63c26dec5da39b8458b1e46bcc4f922ab7dc | refs/heads/master | 2021-09-09T03:45:54.147829 | 2018-03-13T16:50:50 | 2018-03-13T16:50:50 | 119,363,307 | 0 | 1 | MIT | 2018-03-13T16:50:51 | 2018-01-29T09:46:08 | Python | UTF-8 | Python | false | false | 1,907 | py | '''
Extract step for application error rates pipeline
'''
import luigi
from pipeline_steps.export_to_google_sheets import ExportToGoogleSheets
from pipeline_steps.production_error_rates_source import ProductionErrorRatesSource
from error_rate_pipeline_extract import ErrorRatePipelineExtract
from pipeline_util.google_sheet_client import GoogleSheetClient, GoogleSheetTarget
from base import BaseTask
import pandas as pd
import os
class ErrorRatePipelineLoad(BaseTask):
def requires(self):
return [
ErrorRatePipelineExtract(application='whitehall-admin', date_interval=self.date_interval),
ErrorRatePipelineExtract(application='whitehall-frontend', date_interval=self.date_interval),
]
def __init__(self, *args, **kwargs):
super().__init__(task_name='error_rate_extract', *args, **kwargs)
self.filename = filename = self.resource_manager.output_file_name(
step_name=self.task_name,
segment=self.segment
).replace('.csv', '')
self.glossary = ProductionErrorRatesSource.glossary()
self.gsheet_share_email = os.environ['PLATFORM_METRICS_MAILING_LIST']
def output(self):
return GoogleSheetTarget(self.filename)
def run(self):
for application in ('whitehall-admin', 'whitehall-frontend'):
df = self.load_from_step('error_rate_extract', application)
export_step = ExportToGoogleSheets(GoogleSheetClient())
export_step.validate_input(
df,
df_name='input from error_rate_extract {}'.format(application)
)
export_step.write_data(
df,
self.filename,
self.glossary,
share_email=self.gsheet_share_email
)
if __name__ == '__main__':
luigi.run(main_task_cls=ErrorRatePipelineLoad, local_scheduler=True)
| [
"matmoore@users.noreply.github.com"
] | matmoore@users.noreply.github.com |
42e555c359f3334d8e2cea4ef890e0bcdd2716ec | 2100ea2f84adcaeb89bd8e49f50d03497a1f87fb | /txtToExcel/txtToExcel.py | 747f8dea3e15fc01ffdabad8be75c0c69373802b | [] | no_license | JBPrew/RNG-Generator | d1c8c79710fce5b122a4423b3aab207698bb08ce | 20db203e702987fc937660b744b0bcb1ff63b508 | refs/heads/main | 2023-04-20T18:41:04.067449 | 2021-05-24T01:56:23 | 2021-05-24T01:56:23 | 370,199,459 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | import xlsxwriter
import pandas as pd
import xlrd
from xlsxwriter.utility import xl_rowcol_to_cell
txtFileName = input("Name of txt File: ")
while txtFileName == "":
txtFileName = input("Name of txt File: ")
workName = input("Excel Worksheet Name: ")
if (workName == ""):
workName = txtFileName
# Create a workbook and add a worksheet.
txtFile = open("C:/SciFair/txtFiles/" + txtFileName + ".txt", "r")
# txtFileLength = len(txtFile.readlines())
txtString = txtFile.read()
workbook = xlsxwriter.Workbook('C:/SciFair/txtToExcel/' + workName + '.xlsx')
worksheet = workbook.add_worksheet()
print("Excel Sheet Created")
# Start from the first cell. Rows and columns are zero indexed.
row = 5
col = 0
start = xl_rowcol_to_cell(row, col)
# Iterate over the data and write it out row by row.
for i in range(len(txtString)):
worksheet.write(row, col, int(txtString[i]))
col += 1
if (col >= 2500):
row += 1
col = 0
print("Excel Sheet Filled")
col = 2500
end = xl_rowcol_to_cell(row, col)
worksheet.write_formula(0, 0, "=AVERAGE(" + start + ":" + end + ")")
worksheet.write_formula(1, 0, "=MEDIAN(" + start + ":" + end + ")")
worksheet.write_formula(2, 0, "=MODE(" + start + ":" + end + ")")
workbook.close()
| [
"jackbprewitt@gmail.com"
] | jackbprewitt@gmail.com |
e333c381e106259eee7a3f4e11f26674dd3a3594 | 30a8b69bd2e0a3f3c2c1c88fb3bd8a28e6fc4cd0 | /Part1/auth_foursquare.py | dc09d963b40958ce2c5e3b9030a232e3dd9ca643 | [] | no_license | llord1/Mining-Georeferenced-Data | d49108f443922f02b90431ad7a9626ea17fd0554 | c71f2e151ccfc4a1a9c07b5fcf4e95b7f7ba70e9 | refs/heads/master | 2021-05-30T13:27:57.663015 | 2015-12-29T09:10:08 | 2015-12-29T09:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #!/usr/bin/env python
import foursquare
from foursquare_accounts import accounts
app = accounts["tutorial"]
client = foursquare.Foursquare(client_id=app["client_id"],
client_secret=app["client_secret"])
client.set_access_token(app["access_token"])
| [
"bgoncalves@gmail.com"
] | bgoncalves@gmail.com |
dba9be4055644dd269dab56629dd4b7349338eb4 | 10321b56b812da315724c7022e7b0f48a3434883 | /07-ejercicios/ejercicio8.py | 0962be1baba04bece7c49d64c7a2c341d0fe9ba3 | [] | no_license | nisepulvedaa/curso-python | 8ef8e6f2b604c10b4549c9046a26be02bfd76ccb | 0e1904e71b4cb515920c1610ac6f168dc307d631 | refs/heads/master | 2023-01-29T21:52:44.912261 | 2020-12-14T22:47:55 | 2020-12-14T22:47:55 | 321,491,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
Ejercico 8
¿Cuanto es el x % de z numero ?
20 de 150
"""
numero1 = int(input("Ingrese el porcentaje que desea calcular: "))
numero2 = int(input("Ingrese el numero que sea sacar el porcentaje: "))
if numero1 > 100:
print("Su porcentaje no puede mayor a 100")
else:
porcentaje = int((numero1*100)/numero2)
print("El {} % de {} es: {} ".format(numero1, numero2, porcentaje)) | [
"ni.sepulvedaa@gmail.com"
] | ni.sepulvedaa@gmail.com |
4c601e56a8d4ebc420b8a94e8588e6635cb33073 | 4780fd0e2afc40c3bbc8736605ebc5c351f3bc30 | /checkMidpoint.py | 9a0266319429f7108a0f59d9f435102131a91392 | [] | no_license | fpeckert/cbp_database_public | a1e85a80bd21b72d4a6ea5b3c0065b2cc2ffa85e | 0d671a613e74218976ba78577c9a8ba004787d2d | refs/heads/master | 2022-02-08T15:20:21.611884 | 2021-02-01T04:12:44 | 2021-02-01T04:12:44 | 225,522,376 | 18 | 2 | null | 2022-02-01T14:30:35 | 2019-12-03T03:27:50 | Python | UTF-8 | Python | false | false | 4,713 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 19:04:20 2020
@author: Anubhav Agarwal
"""
import cbp
import pandas as pd
def midpointable(year, suffix):
is_sic = False
if int(year) <= 1997:
is_sic = True
national_df = pd.read_csv('cbp' + year + 'us' + suffix + '.csv')
state_df = pd.read_csv('cbp' + year + 'st' + suffix + '.csv')
county_df = pd.read_csv('cbp' + year + 'co' + suffix + '.csv')
if is_sic:
national_df = national_df.rename(index=str, columns={'ind': 'naics'})
state_df = state_df.rename(index=str, columns={'ind': 'naics'})
county_df = county_df.rename(index=str, columns={'ind': 'naics'})
industry_ref_file = cbp.refFileName(year)
naics_codes = cbp.newNaicsCodes(industry_ref_file, year)
geo_codes = cbp.geoCodes(state_df, county_df)
# ##
# Construct tree for NAICS codes
# ##
# determine level function based on which industry code is used
industry_level_function = cbp.naics_level
if is_sic:
industry_level_function = cbp.sic_level
naics_tree = cbp.preorderTraversalToTree(naics_codes, 'naics', industry_level_function)
# ##
# Construct tree for Geography
# ##
geo_tree = cbp.preorderTraversalToTree(geo_codes, 'geo', cbp.geo_level)
results_df = cbp.merge_dataframes(national_df, state_df, county_df)
results_df = results_df.rename(index = str, columns = {'ind' : 'naics'})
results_df = results_df.drop_duplicates(subset = ['naics', 'geo'])
ub_matrix = results_df.pivot(index='naics', columns='geo', values='ub').fillna(0).astype(int)
lb_matrix = results_df.pivot(index='naics', columns='geo', values='lb').fillna(0).astype(int)
for geo_index, geo in enumerate(geo_codes):
for naics_index, naics in enumerate(naics_codes):
# does the geo code have children in theory
if len(geo_tree[geo_index]['children']) != 0:
children = list(map(lambda x: geo_codes[x], geo_tree[geo_index]['children']))
# sum of geographical children's lower/upper bounds
geo_sum_lower = sum(lb_matrix[geo_codes[child]][naics] for child in geo_tree[geo_index]['children'])
geo_sum_upper = sum(ub_matrix[geo_codes[child]][naics] for child in geo_tree[geo_index]['children'])
# check if the code has children in data
if geo_sum_upper == 0:
continue
if geo_sum_lower > ub_matrix[geo][naics] or geo_sum_upper < lb_matrix[geo][naics]:
print('Found error in geo tree at industry code = %s' % naics)
print('children sum (lower, upper): ' + str((geo_sum_lower, geo_sum_upper)))
print('parent (lower, upper): ' + str((lb_matrix[geo][naics], ub_matrix[geo][naics])))
print()
print('Parent: %s. Children: %s \n' % (str(geo), children))
return False
# check if the geo code has children in theory (in the industry tree)
if len(naics_tree[naics_index]['children']) != 0:
children = list(map(lambda x: naics_codes[x], naics_tree[naics_index]['children']))
# sum of industrial children's lower/upper bounds
naics_sum_lower = sum(lb_matrix[geo][naics_codes[child]] for child in naics_tree[naics_index]['children'])
naics_sum_upper = sum(ub_matrix[geo][naics_codes[child]] for child in naics_tree[naics_index]['children'])
# check if the code has children in data
if naics_sum_upper == 0:
continue
if naics_sum_lower > ub_matrix[geo][naics] or naics_sum_upper < lb_matrix[geo][naics]:
# sic does not have exact hierarchy after level 2 (inclusive)
if is_sic and cbp.sic_level(naics) >= 2 and naics_sum_upper < lb_matrix[geo][naics]:
continue
# discrepancy
print('Found error in industry tree at geoography (fipstate, fipscty) = %s' % str(geo))
print('children sum (lower, upper): ' + str((naics_sum_lower, naics_sum_upper)))
print('parent (lower, upper): ' + str((lb_matrix[geo][naics], ub_matrix[geo][naics])))
print()
print('Parent: %s. Children: %s \n' % (naics, children))
return False
return True | [
"noreply@github.com"
] | noreply@github.com |
2c49843004e5af3d4f6f1f7a39f56f8280186b0e | ef2dfe0f97cf78e520fc6e30d795691c75dac840 | /kin/util.py | d7ce97fe299697f2714c827d5559b248764a8f7e | [
"MIT"
] | permissive | smartdolphin/ai-hackathon-2018 | bbcf142865fdf05f0aacd070e8d7f8f99e513dfb | 2dce1870ea5a5e10917bb5a018890973e48df886 | refs/heads/master | 2021-09-14T22:27:54.631329 | 2018-05-19T17:29:05 | 2018-05-19T17:30:56 | 131,360,706 | 1 | 0 | MIT | 2018-04-28T01:41:53 | 2018-04-28T01:41:53 | null | UTF-8 | Python | false | false | 667 | py | import os
import tensorflow as tf
PATH = '/tmp/kin/models'
def local_save(sess, epoch, *args):
os.makedirs(PATH, exist_ok=True)
saver = tf.train.Saver()
saver.save(sess, os.path.join(PATH, str(epoch)))
train_writer = tf.summary.FileWriter(os.path.join(PATH, str(epoch)),
sess.graph)
def local_load(sess):
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(PATH)
if ckpt and ckpt.model_checkpoint_path:
checkpoint = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(PATH, checkpoint))
else:
raise NotImplemented('No checkpoint!')
print('Model loaded')
| [
"smartdolphin07@gmail.com"
] | smartdolphin07@gmail.com |
98c418f97a9b856ecb5507a37fe70ad0085b224a | 39996673e85e32497f1a771e6d55fe317fd4e9f2 | /Beginner level/strpalindrome.py | 46a2d25e65555e3ddb038ddfde76bf88d5216c24 | [] | no_license | hariharan31998/Python-Programming | 638d1be2d59ad8a6a675b94d1c08c04139344de9 | 17aba448c36596eea1ab59b5346e53ed644b66de | refs/heads/master | 2021-04-27T13:13:49.961069 | 2019-02-06T05:25:57 | 2019-02-06T05:25:57 | 122,435,676 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | a=str(input())
print(a)
b=a[::-1]
print(b)
if a==b:
print('yes')
else:
print('no')
| [
"noreply@github.com"
] | noreply@github.com |
3b086ad101b3545f4ba98eab3b0c7bd9ac43e051 | d487843a873554db9ea63dac55110591c265c984 | /classes/utilities.py | 96d08b7a02c479407b1cc21de574a00ef555438d | [] | no_license | simone989/ForwardingDropboxBot | dd7e7215fb2cc3556da843ad3c10b0375fb8a45d | 2ba8ec1f36930b355168456c20a45998fd8b9f48 | refs/heads/master | 2021-01-01T05:22:35.110007 | 2016-05-10T15:06:54 | 2016-05-10T15:06:54 | 57,361,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | #!/usr/bin/env python
import dropbox
class Response(object):
def __init__(self):
self.responseFile = open("commands.txt","r")
self.textLines = []
self.fromFileToList()
def fromFileToList(self): #Metodo che salve il contenuto del txt in una lista. Più semplice da gestire.
with self.responseFile as fileLine:
self.textLines = fileLine.read().splitlines()
self.responseFile.close()
def getList(self): #Metodo temporaneo. Per la prova della stampa della lista
for lines in self.textLines:
print (lines)
def responseText(self,command):
for lines in self.textLines:
lineSplitted = lines.split("=")
if(self.isCommand(command,lineSplitted[0]) == True):
return lineSplitted[1]
return False
def isCommand(self,textCommand, fileCommand):
#if(textCommand == fileCommand):
#return True
#elif
print ("Comando del file: /"+fileCommand)
if(textCommand == "/"+fileCommand):
return True
return False
def reloadFile(self):
try:
self.responseFile = open("commands.txt","r")
self.textLines = []
self.fromFileToList()
return True
except Exception as error:
print (error) #debug
return False
class CommandManager(object):
def __init__(self):
self.errorMessage = "Comando non riconosciuto"
self.sessionOpen = False
def executeCommand(self,command,fromuser = None):
command = self.parseCommand(command)
if(self.sessionOpen):
if(self.dropboxSession.waitingToken == True and command.split(" ")[0] == "/token"):
self.dropboxSession.token = command.split(" ")[1]
return self.dropboxSession.startAuth()
else:
if(command.startswith("/token")):
return "Iniziare una sessione con /startsession prima di continuare."
if(command == "/listFiles"):
return self.dropboxSession.listOfFile()
if(command == "/start"):
print ("Debug metodo. Per evitare il loop")
return "Started"
elif (command == "/info"):
return "Bot creato da @xVinz e @simone989"
elif (command == "/startsession"):
self.startSession(fromuser.username)
self.dropboxSession.waitingToken = True
return "Digita /token <token di autenticazione> per procedere."
#self.startSession(fromuser.username)
else:
return self.errorMessage
def parseCommand(self,command):
if(command.startswith("/")):
return command
else:
return "/"+command.lower()
def startSession(self,user):
self.sessionOpen = True
self.dropboxSession = NewSession()
def DEBUGlog(self,fromuser,command):
print("Utente: "+ fromuser +" ha digitato: "+command)
class NewSession(object):
def __init__(self):
self.isAuthenticated = False
self.waitingToken = False
self.token = None
print ("DEBUG Inizializzazione classe NewSession")
def startAuth(self):
self.client = dropbox.client.DropboxClient(self.token)
self.checkAuthentication()
if(self.isAuthenticated):
self.waitingToken = False
return "Autenticazione eseguita correttamente."
else:
self.waitingToken = True
return "Errore nell'inserimento del token."
def checkAuthentication(self):
try:
self.client.account_info()
self.isAuthenticated = True
except Exception as error:
self.isAuthenticated = False
def listOfFile(self):#Da continuare
folderMetadata = client.metadata('/')
return folderMetadata | [
"0xVinz@gmail.com"
] | 0xVinz@gmail.com |
d643ba0ffc2781833fc948211a92075a0281833a | 89cef15f8c54f16599f461bce55c4c29954fb918 | /scrapy_workua/scrapy_workua/settings.py | d393d3570ca2b72aeee684626fbfc9104b9763a3 | [] | no_license | KonDob/HT_20_Scrapy | 39417738e6fb9ade6f600bb021c323ef1718f51c | b762ea4d26c01b994bed066ffdb011b467220d91 | refs/heads/master | 2023-03-24T03:53:21.939319 | 2021-03-09T14:23:34 | 2021-03-09T14:23:34 | 343,230,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | # Scrapy settings for scrapy_workua project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapy_workua'
SPIDER_MODULES = ['scrapy_workua.spiders']
NEWSPIDER_MODULE = 'scrapy_workua.spiders'
# Crawlresponsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapy_workua (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapy_workua.middlewares.ScrapyWorkuaSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapy_workua.middlewares.ScrapyWorkuaDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrapy_workua.pipelines.ScrapyWorkuaPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings # noqa: E501
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"konstantin.dobro@gmail.com"
] | konstantin.dobro@gmail.com |
be0eb741b4aaaad5085131454dec219bdd1c93dd | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_30/ar_/test_artificial_1024_Anscombe_LinearTrend_30__100.py | 70d9b6daa1932fc44ee8f23227fa9317aea8fd0d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 268 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
0eb944d3d4b625dd58953dcd5ad39efa5bcaeaa1 | 9c14bd53c8629262b1310962c1663a3c503ba3a0 | /projects/golem/tests/project/add_directory_to_pages.py | 7b7d283d6d17e02f6e630f4b7d7aad6a000fea95 | [] | no_license | ShubhamAnand/golem-demo | b083d44b5d2d5db79eae96aa5bb1f3307272d64b | a40ced5500b3bfdb54351393eeb8ccba19a50564 | refs/heads/master | 2021-07-16T00:44:57.663282 | 2017-10-22T22:56:25 | 2017-10-22T22:56:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py |
description = 'Verify that the user can add a directory in the pages section by appending \'\\\' at the end'
pages = ['login',
'index',
'project']
def setup(data):
navigate(data.env.url)
login.do_login('admin', 'admin')
index.create_access_project('test')
def test(data):
store('directory_name', random('ccccc'))
project.add_page_directory(data.directory_name)
project.verify_page_directory_exists(data.directory_name)
def teardown(data):
close()
| [
"feo.luciano@gmail.com"
] | feo.luciano@gmail.com |
c35024eb1eed9b0da1bdde17899977fd5b9b5c96 | 0201ac814d825cac1030dfe1ccdb7ef1657c205b | /__init__.py | a403709aa7de47dca868813496d90679f83afbc3 | [
"BSD-3-Clause"
] | permissive | karttur/geoimagine03-timeseries | c99be449dccaab767d470cfaa2b71d9dae339fba | aa8e1642fd4a8bc196ad6fce9f90b80066d54dac | refs/heads/main | 2023-08-22T14:12:50.791746 | 2021-10-20T10:54:37 | 2021-10-20T10:54:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | """
timeseries
==========================================
Package belonging to Karttur´s GeoImagine Framework.
Author
------
Thomas Gumbricht (thomas.gumbricht@karttur.com)
"""
from .version import __version__, VERSION, metadataD
from .timeseries import ProcessTimeSeries
from .numbautil import TimeSeriesNumba
#from .tsgraph import ProcessTimeSeriesGraph
| [
"thomas.gumbricht@gmail.com"
] | thomas.gumbricht@gmail.com |
ee6bca645dc84757845a11e614e0667477bad197 | 2c0d0a3757f4a21b2de2afc3b8293fe1a2292d68 | /test_algoritmos.py | 8af9e316f6bd27299e801a0481ee7037c91a2be4 | [] | no_license | iErick99/Tarea2-Python | ad0fbfdc24babf456d739df86da6442e9e633c36 | b3939cdd415ded50a434cbfa96070167716c81d6 | refs/heads/master | 2020-04-08T02:40:11.984527 | 2018-11-24T14:30:24 | 2018-11-24T14:30:24 | 158,943,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | from Algoritmos import isanigrama, ispalindromo
def test_isanigrama():
a = 'hola'
b = 'carro'
assert isanigrama(a,b) == False, 'Error crítico'
def test_ispalindromo():
a = 'hola'
assert ispalindromo(a) == False, 'Error crítico'
def test_isanigrama2():
a = 'car'
b = 'arc'
assert isanigrama(a, b) == True, 'Error crítico'
def test_ispalindromo2():
a = 'arenera'
assert ispalindromo(a) == True, 'Error critico' | [
"lolerick1@hotmail.com"
] | lolerick1@hotmail.com |
63a89a990c8d496437077cb8b6227666b7df1a26 | 9b7192094d347fb8b506e3a2a67806cab67749f2 | /stackoverflow/src/question/migrations/0005_auto_20151117_1839.py | 903fa8b5d16878b7e57aa0a21410cd1b36488f24 | [] | no_license | ElSacramento/django | 830c100907e6d5936761121dec1f8e9ad7d4c286 | 018f9814318df1b30c02bfeb55562debace5bd5b | refs/heads/master | 2021-01-20T19:30:20.440173 | 2016-06-24T00:03:51 | 2016-06-24T00:03:51 | 61,843,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('question', '0004_auto_20151117_1835'),
]
operations = [
migrations.AlterField(
model_name='question',
name='creation_date',
field=models.DateTimeField(default=django.utils.timezone.now, blank=True),
),
]
| [
"catherine.zamaraeva@gmail.com"
] | catherine.zamaraeva@gmail.com |
4034bde7a9e06c5d7487997a7acb9e10b85cca2b | 0f1001169c4f229c253a6f1dc1c9aff51c797cca | /docs/markdown_to_html.py | ffacda661ea31a8286a001a77d5178f08b9a1fd3 | [
"Apache-2.0"
] | permissive | alencon/dash-bootstrap-components | f40e360787c96a1d9f7827cf042872b2f9cffcac | 4f39856c13f66730512c57ed6dc0a819e8629293 | refs/heads/master | 2023-01-22T13:07:05.880865 | 2020-12-03T21:25:50 | 2020-12-03T21:25:50 | 318,998,227 | 1 | 0 | Apache-2.0 | 2020-12-06T09:42:13 | 2020-12-06T09:42:13 | null | UTF-8 | Python | false | false | 1,541 | py | from pathlib import Path
import markdown
from markdown.extensions.fenced_code import FencedBlockPreprocessor
# highlightJS expects the class "language-*" but markdown default is "*"
FencedBlockPreprocessor.LANG_TAG = ' class="language-%s"'
CONTENT = Path(__file__).parent / "content"
DEST = Path(__file__).parent / "templates" / "generated"
DOCS_HTML_TEMPLATE = """{% extends "docs.html" %}
{% block title %}<title><TITLE></title>{% endblock %}
{% block content %}<CONTENT>{% endblock %}
"""
CHANGELOG_HTML_TEMPLATE = """{% extends "changelog.html" %}
{% block title %}<title><TITLE></title>{% endblock %}
{% block content %}<CONTENT>{% endblock %}
"""
def convert_all_markdown_files():
for path in CONTENT.glob("docs/*.md"):
template = template_from_markdown(path, title_suffix=" - dbc docs")
with open(DEST / "docs" / path.name.replace(".md", ".html"), "w") as f:
f.write(template)
for path in CONTENT.glob("*.md"):
template = template_from_markdown(
path, template=CHANGELOG_HTML_TEMPLATE
)
with open(DEST / path.name.replace(".md", ".html"), "w") as f:
f.write(template)
def template_from_markdown(path, title_suffix="", template=DOCS_HTML_TEMPLATE):
md = markdown.Markdown(extensions=["fenced_code", "meta"])
text = path.read_text()
template = template.replace("<CONTENT>", md.convert(text))
return template.replace("<TITLE>", f"{md.Meta['title'][0]} - dbc docs")
if __name__ == "__main__":
convert_all_markdown_files()
| [
"tomcbegley@gmail.com"
] | tomcbegley@gmail.com |
33f930a46d509398e633fffe7486592b06d47845 | 3fc8a38257d855e95fe16c1f117790fdfcc665e5 | /contrib/spendfrom/spendfrom.py | d06c624a29f6d532e1e2b6600d07ac3aedc5ce09 | [
"MIT"
] | permissive | MichaelHDesigns/Aezora | b139e6b0a0886dc8fe4f1e0ed1a702cb26ca6986 | fc7170fb5e0e016379f98d4691b014e8ca0746ad | refs/heads/master | 2023-03-13T00:31:17.894219 | 2021-03-08T14:29:27 | 2021-03-08T14:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,996 | py | #!/usr/bin/env python
#
# Use the raw transactions API to spend AZRs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a aezorad or aezora-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the aezora data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/AEZORA/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "AEZORA")
return os.path.expanduser("~/.aezora")
def read_bitcoin_config(dbdir):
"""Read the aezora.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "aezora.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a aezora JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 14727 if testnet else 14724
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the aezorad we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(aezorad):
info = aezorad.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
aezorad.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = aezorad.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(aezorad):
address_summary = dict()
address_to_account = dict()
for info in aezorad.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = aezorad.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = aezorad.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-aezora-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(aezorad, fromaddresses, toaddress, amount, fee):
all_coins = list_available(aezorad)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to aezorad.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = aezorad.createrawtransaction(inputs, outputs)
signed_rawtx = aezorad.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(aezorad, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = aezorad.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(aezorad, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = aezorad.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(aezorad, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get AZRs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send AZRs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of aezora.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
aezorad = connect_JSON(config)
if options.amount is None:
address_summary = list_available(aezorad)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(aezorad) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(aezorad, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(aezorad, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = aezorad.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [
"59030118+Aezora@users.noreply.github.com"
] | 59030118+Aezora@users.noreply.github.com |
ef6d97f0f5f530a0ebddca253af70a73492955c8 | 61b83fb5d4e9e09bddf1a90f2d2ac34962df02db | /elitedata/fixtures/__init__.py | d9d0fb4b2ca6383d00f26ea2fb1dbfa8ffdf7b22 | [
"MIT"
] | permissive | jingyuyao/elitetraderoutes | 0577b9cfa06e041507a091b751bea03a2e419143 | a5b9ac9fba89b594843e527962b0b8539157cfd2 | refs/heads/master | 2021-01-21T09:28:40.626322 | 2016-03-27T00:12:44 | 2016-03-27T00:12:44 | 36,675,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26 | py | __author__ = 'Jingyu_Yao'
| [
"yjy.emails@gmail.com"
] | yjy.emails@gmail.com |
e3e98cd7aaf3e9e63df6dd813cbada7c0adbbda5 | 5e6a67ae273bcbd55dfe0a77d4c839a2e554d3bf | /tree_parsing.py | ee094aed389950a95db950770158e1ce95218d4e | [] | no_license | Prabhanjan-Upadhya/Quora_Challenges | d54dd1e20c9c70f07b68fffe0c67e0d71290a786 | d26ddce869205d90b367f88a2dd9c3c11b2466a7 | refs/heads/master | 2021-01-09T20:33:31.220380 | 2016-06-27T17:33:31 | 2016-06-27T17:33:31 | 62,074,863 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | from collections import deque
def parser(string1, tree_dict):
start = 0
root = None
prev = None
lst1 = []
for i in range (0, len(string1)):
if string1[i] == ' ':
print tree_dict
topic = string1[start: i]
if topic == '(':
if len(tree_dict[prev]) > 0:
prev = tree_dict[prev][-1]
lst1.append(prev)
start = i+1
continue
if topic == ')':
del lst1[-1]
if len(lst1) > 0:
prev = lst1[-1]
start = i+1
continue
if root == None:
root = topic
tree_dict[topic]= []
prev = root
lst1.append(root)
else:
tree_dict[topic] = []
tree_dict[prev].append(topic)
start = i+1
return tree_dict
def main():
string1 = "Animals ( Reptiles Birds ( Eagles Pigeons Crows ) )"
tree_dict = {}
tree_dict = parser(string1, tree_dict)
print tree_dict
main()
| [
"pupadhy@clemson.edu"
] | pupadhy@clemson.edu |
fd96a157748820ddb00179c606ac48e6ff091f69 | 0e0545c05ed9e019d070f732b9f96b7cec1c64f3 | /student/schedule.py | 49b9800c7b087672e1a76d910c9d93fbeffebb03 | [] | no_license | czarhao/sync_crawler | 8b635a62b04518d968b4f41f82e6014966d4e94e | f8c7641618d3befe028195e6d26de964b7ad33d5 | refs/heads/master | 2023-05-27T19:37:48.184658 | 2021-09-14T10:47:25 | 2021-09-14T10:47:25 | 203,547,407 | 3 | 0 | null | 2023-05-22T22:26:26 | 2019-08-21T09:05:58 | Python | UTF-8 | Python | false | false | 4,844 | py | #!/usr/bin/env python3
import json
from other import return_soup
from student.models import Course
def get_schedule(session, now_url, soup, use_header): # 获取用户课程表
try:
url = now_url + soup.find(onclick="GetMc('学生个人课表');")["href"]
schedule_info = session.get(url, headers=use_header)
schedule_soup, _ = return_soup(schedule_info)
tmp_time = 0
tmp_info = []
for tmp_tr in schedule_soup.find(id="Table1").find_all("tr"):
if tmp_time > 1:
for tmp_td in tmp_tr.find_all("td"):
if tmp_td.text in ["上午", "下午", "晚上"]:
pass
elif tmp_td.text == "":
pass
else:
tmp_info = tmp_info + get_info_td(str(tmp_td))
tmp_time = tmp_time + 1
return return_json_schedule(True, "", tmp_info)
except Exception as err:
return return_json_schedule(False, str(err), [])
def return_start_end(start_end): # 获取上课时间,第几节,第几周
if "|" in start_end:
return start_end[1], start_end[start_end.find('第') + 1: start_end.find("{") - 1], \
start_end[start_end.find("{") + 2: start_end.find("-")], start_end[start_end.find("-") + 1:-5]
return start_end[1], start_end[start_end.find('第') + 1: start_end.find("{") - 1], \
start_end[start_end.find("{") + 2: start_end.find("-")], start_end[start_end.find("-") + 1:-2]
def return_start_end_jud(start_end): # 获取上课时间,第几节,第几周,单双周
if '单周' in start_end:
return start_end[1], start_end[start_end.find('第') + 1: start_end.find("{") - 1], \
start_end[start_end.find("{") + 2: start_end.find("-")], start_end[start_end.find("-") + 1:-5], 1
elif '双周' in start_end:
return start_end[1], start_end[start_end.find('第') + 1: start_end.find("{") - 1], \
start_end[start_end.find("{") + 2: start_end.find("-")], start_end[start_end.find("-") + 1:-5], 2
else:
return start_end[1], start_end[start_end.find('第') + 1: start_end.find("{") - 1], \
start_end[start_end.find("{") + 2: start_end.find("-")], start_end[start_end.find("-") + 1:-2], 0
def get_info_td(td): # 对td进行分词,得到学生课表
return_list = []
start = True
r_list = []
tmp_info = ""
for value in td:
if value == ">":
start = not start
elif value == "<":
start = not start
r_list.append(tmp_info)
tmp_info = ""
elif start:
tmp_info = tmp_info + value
if len(r_list) > 4:
if len(r_list) > 10:
week, sec, star, end, jud = return_start_end_jud(r_list[3])
new_course = Course(r_list[1] + "(" + r_list[2] + ")", r_list[5], week, sec,
int(star), int(end), r_list[4], jud)
return_list.append(new_course)
week, sec, star, end, jud = return_start_end_jud(r_list[9])
other_course = Course(r_list[7] + "(" + r_list[8] + ")", r_list[11], week, sec,
int(star), int(end), r_list[10], jud)
return_list.append(other_course)
else:
week, sec, star, end = return_start_end(r_list[3])
new_course = Course(r_list[1] + "(" + r_list[2] + ")", r_list[5], week, sec,
int(star), int(end), r_list[4], 0)
return_list.append(new_course)
return return_list
def return_json_schedule(jud, err_info, info_list): # 返回json
maps = []
return_json = {
"success": jud,
"info": err_info,
"courses": maps
}
if jud:
time = 0
for i in info_list:
if "(学必)" in i.cname:
cour_type = 0
elif "(学选)" in i.cname:
cour_type = 1
else:
cour_type = 2
tmp = i.section.find(',')
if tmp == -1:
cour_start = int(i.section)
cour_length = 1
else:
cour_start = int(i.section[:tmp])
cour_length = int(i.section[tmp + 1:]) - cour_start + 1
tmp_map = {
"id": time,
"type": cour_type,
"start": i.start,
"end": i.end,
"day_week": i.day_week,
"cour_start": cour_start,
"cour_length": cour_length,
"cour_name": i.cname,
"teacher_name": i.tname,
"cour_where": i.where,
"jud": i.jud
}
maps.append(tmp_map)
time += 1
return return_json
| [
"czarhao@foxmail.com"
] | czarhao@foxmail.com |
fcf2843bba0bc1f598ab442734dbe565c35f7c93 | bb67740f39e896f4bef0677a3bc164781e533195 | /clt/admin.py | c0a932b6d407645297af6575967a97701a90df6f | [] | no_license | gabik/clt | c1fb61fb3ebf40091d68d03b54d744140056761b | 76786c0a65c7bbf9b2f4ca6dc16408a37b537666 | refs/heads/master | 2021-01-23T09:33:19.584041 | 2013-12-14T13:25:29 | 2013-12-14T13:25:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from django.contrib import admin
from clt.models import xml_model, contact_group, group_members, contact_element, contact_phones
#class areaAdmin(admin.ModelAdmin):
#list_display = ['id', 'parent', 'name']
admin.site.register(xml_model)
admin.site.register(contact_group)
admin.site.register(contact_phones)
admin.site.register(contact_element)
admin.site.register(group_members)
| [
"gabi@ip-10-190-129-144.ec2.internal"
] | gabi@ip-10-190-129-144.ec2.internal |
286c6510e842c109cd1fabfbe090e84a978c9b28 | fab14fae2b494068aa793901d76464afb965df7e | /benchmarks/f3_wrong_hints/scaling_ltl_timed_transition_system/19-sender_receiver_7.py | 46038b17bbf43428b545dfc234384f2bb5c2c34d | [
"MIT"
] | permissive | teodorov/F3 | 673f6f9ccc25acdfdecbfc180f439253474ba250 | c863215c318d7d5f258eb9be38c6962cf6863b52 | refs/heads/master | 2023-08-04T17:37:38.771863 | 2021-09-16T07:38:28 | 2021-09-16T07:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,406 | py | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i0))
hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| [
"en.magnago@gmail.com"
] | en.magnago@gmail.com |
17c59ae366737608237c203cd7a63e511f3e1002 | 13da61043e182789cfe8d67e0e1f8a4d4d983e96 | /src/cms/views/settings/user_settings_view.py | 6e83e61cde7860e8a32ec2c28cfd3c1b2c0324eb | [
"Apache-2.0"
] | permissive | digitalfabrik/coldaid-backend | b9af0f9129a974e92bc89003c4266d8e48539d0f | b769510570d5921e30876565263813c0362994e2 | refs/heads/develop | 2020-09-25T21:48:20.846809 | 2020-06-17T15:48:53 | 2020-06-17T15:48:53 | 226,094,918 | 4 | 2 | Apache-2.0 | 2020-06-17T15:48:54 | 2019-12-05T12:15:01 | HTML | UTF-8 | Python | false | false | 3,005 | py | from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from django.shortcuts import render, redirect
from django.views.decorators.cache import never_cache
from ...forms.users import UserEmailForm, UserPasswordForm
@method_decorator(login_required, name='dispatch')
class UserSettingsView(TemplateView):
template_name = 'settings/user.html'
@never_cache
def get(self, request, *args, **kwargs):
user = request.user
user_email_form = UserEmailForm(instance=user)
user_password_form = UserPasswordForm(instance=user)
return render(
request,
self.template_name,
{
'keys': user.mfa_keys.all(),
'user_email_form': user_email_form,
'user_password_form': user_password_form,
}
)
# pylint: disable=unused-argument, too-many-branches
def post(self, request, *args, **kwargs):
user = request.user
if request.POST.get('submit_form') == 'email_form':
user_email_form = UserEmailForm(
request.POST,
instance=user
)
if not user_email_form.is_valid():
# Add error messages
for field in user_email_form:
for error in field.errors:
messages.error(request, _(error))
for error in user_email_form.non_field_errors():
messages.error(request, _(error))
elif not user_email_form.has_changed():
messages.info(request, _('No changes detected.'))
else:
user_email_form.save()
messages.success(request, _('E-mail-address was successfully saved.'))
elif request.POST.get('submit_form') == 'password_form':
user_password_form = UserPasswordForm(
request.POST,
instance=user
)
if not user_password_form.is_valid():
# Add error messages
for field in user_password_form:
for error in field.errors:
messages.error(request, _(error))
for error in user_password_form.non_field_errors():
messages.error(request, _(error))
elif not user_password_form.has_changed():
messages.info(request, _('No changes detected.'))
else:
user = user_password_form.save()
# Prevent user from being logged out after password has changed
update_session_auth_hash(request, user)
messages.success(request, _('Password was successfully saved.'))
return redirect('user_settings')
| [
"ludwig+secure@integreat-app.de"
] | ludwig+secure@integreat-app.de |
6f25ab872e193cdb10b2e82ee3a0967273771d8c | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_noisy99.py | 63b18b1983cf2ea49aadbaea3bb289271ef74cae | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,183 | py | # qubit number=3
# total number=18
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy99.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
0c27bf906d4b831769bc6b193c48e89a8aafe32d | 445368830cc2918f71cdca166e1363c4ac08c56e | /D12_Recursive_power.py | 9ebb7a2c5176e9bdbb0be2f62e51f63275e48409 | [] | no_license | Chehlarov/Python-Advanced | fe9cea7c600ca064db89bc11af43d944a4385b74 | 4bfddcfb02d59385d0083a5acbf338beab136a8e | refs/heads/main | 2023-03-13T08:53:03.936039 | 2021-03-03T17:16:02 | 2021-03-03T17:16:02 | 336,066,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | def recursive_power(number, power):
if power == 1:
return number
return number * recursive_power(number, power - 1)
print(recursive_power(2, 10))
| [
"noreply@github.com"
] | noreply@github.com |
6d553104dce5c9fcd4b3086daec02a36d90a0e92 | 644bde16761bebf341847dc302bcc195e33e664f | /data/scrape.py | f891f46811de9347b9f31c1d04058fe977e4774a | [] | no_license | lukevs/charity-explorer | 797c14d813518869d54cfb3fca74825394646083 | 452d8bf802b05a28eb46bf117dad0a4a8fa97051 | refs/heads/master | 2022-12-10T21:08:27.561555 | 2019-09-17T07:23:58 | 2019-09-17T07:23:58 | 203,031,399 | 5 | 1 | null | 2022-12-09T00:43:36 | 2019-08-18T16:43:36 | JavaScript | UTF-8 | Python | false | false | 1,211 | py | from bs4 import BeautifulSoup as bs
import requests
import csv
BASE_URL = 'https://en.wikipedia.org'
CHARITY_URL = 'https://en.wikipedia.org/wiki/List_of_charitable_foundations'
def get_description(url):
res = requests.get(url)
soup = bs(res.text, 'html.parser')
body = []
for item in soup.find_all("p"):
if item.text.startswith("The history"):
break
body.append(item.text)
return ' '.join(body)
res = requests.get(CHARITY_URL)
soup = bs(res.text, "html.parser")
charities = {}
content = soup.find('div', {'id': 'content'})
for link in content.find_all("a"):
url = link.get("href", "")
if "/wiki" in url and url.count('/') == 2 and ':' not in url:
charities[link.text.strip()] = "%s%s" % (BASE_URL, url)
with open('charities.csv', 'w') as f:
csv_file = csv.writer(f, delimiter='\t')
names = list(charities.keys())
print('found', len(names))
for i, charity_name in enumerate(names):
url = charities[charity_name]
description = get_description(url)
row = [charity_name, url, description]
print('row', row)
csv_file.writerow(row)
f.close()
| [
"chrisdistrict@gmail.com"
] | chrisdistrict@gmail.com |
c560a98412f1f79c8b28518349b9281f419d3cd1 | 5f313d8fce26a8ecfff8817ff566b7e1810fcba7 | /timethings.py | 4d68c2cbbbfce64ba5da5943421cca52b094884d | [] | no_license | luispedro/mahotas-paper | cd2769a264149cac74ce8c694ca4f02e3f4a6c93 | 698f2a8640feba4e285318e2cd866db3705ec2c3 | refs/heads/master | 2020-03-30T16:26:20.362126 | 2013-08-26T09:03:17 | 2013-08-26T09:03:17 | 4,877,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,733 | py | import skimage.morphology
import skimage.filter
import skimage.feature
import numpy as np
import timeit
import mahotas
import cv2
from os import path
luispedro_image = path.join(
path.dirname(mahotas.__file__),
'demos',
'data',
'luispedro.jpg')
f = mahotas.imread(luispedro_image, as_grey=True)
markers = np.zeros_like(f)
markers[100,100] = 1
markers[200,200] = 2
f = f.astype(np.uint8)
markers = markers.astype(int)
otsu = mahotas.otsu(f.astype(np.uint8))
fbin = f > otsu
fbin8 = fbin.astype(np.uint8)
Bc = np.eye(3)
Bc = Bc.astype(bool)
Bc8 = Bc.astype(np.uint8)
f3 = np.dstack([f,f,f])
f3 = f3.astype(np.uint8)
f3 = f3.copy()
filt = np.array([
[1,0,-1,0],
[2,2,3,-2],
[-1,0,0,1]
])
markers32 = markers.astype(np.int32)
def octagon(r):
octagon = np.ones((r*2+1, r*2+1), dtype=np.bool)
lim = r//2
for y in xrange(lim):
octagon[y,:lim-y] = 0
octagon &= octagon[::-1]
octagon &= octagon[:,::-1]
return octagon
pre ='''
import skimage.filter
import skimage.morphology
import skimage.feature
import numpy as np
import mahotas
import pymorph
import cv2
import timethings
octagon = timethings.octagon
f = timethings.f
f3 = timethings.f3
fbin = timethings.fbin
fbin8 = timethings.fbin8
f64 = f.astype(np.float64)
Bc = timethings.Bc
Bc8 = timethings.Bc8
markers = timethings.markers
markers32 = timethings.markers32
filt = timethings.filt
'''
def t(s):
return min(timeit.timeit(s, setup=pre, number=24) for i in xrange(3))
tests = [
('convolve', [
'mahotas.convolve(f, filt)',
None,
None,
None,
]),
('erode', [
'mahotas.erode(fbin, Bc)',
'pymorph.erode(fbin, Bc)',
'skimage.morphology.erosion(fbin8, Bc8)',
'cv2.erode(fbin8, Bc8)',
]),
('dilate', [
'mahotas.dilate(fbin, Bc)',
'pymorph.dilate(fbin, Bc)',
'skimage.morphology.dilation(fbin8, Bc8)',
'cv2.dilate(fbin8, Bc8)',
]),
('open', [
'mahotas.open(fbin, Bc)',
'pymorph.open(fbin, Bc)',
'skimage.morphology.opening(fbin8, Bc8)',
None,
]),
('median filter (2)', [
'mahotas.median_filter(f, octagon(2))',
None,
'skimage.filter.median_filter(f, 2)',
None,
]),
('median filter (10)', [
'mahotas.median_filter(f, octagon(10))',
None,
'skimage.filter.median_filter(f, 10)',
None,
]),
('center mass', [
'mahotas.center_of_mass(f)',
None,
'skimage.measure.regionprops(np.ones(f.shape, np.intc), ["WeightedCentroid"], intensity_image=f)',
None,
]),
('sobel', [
'mahotas.sobel(f, just_filter=True)',
None,
'skimage.filter.sobel(f)',
'cv2.Sobel(f, cv2.CV_32F, 1, 1)',
]),
('cwatershed', [
'mahotas.cwatershed(f, markers)',
'pymorph.cwatershed(f, markers)',
'skimage.morphology.watershed(f, markers)',
'cv2.watershed(f3, markers32.copy())',
]),
('daubechies', [
'mahotas.daubechies(f, "D4")',
None,
None,
None,
]),
('haralick', [
'mahotas.features.haralick(f)',
None,
'skimage.feature.greycoprops(skimage.feature.greycomatrix(f, [1], [0]))',
None,
]),
]
if __name__ == '__main__':
base = t('np.max(f)')
for name,statements in tests:
print r'{0:<20} &'.format(name),
for st in statements:
if st is None:
result = 'NA'
else:
result = '{:.1f}'.format( t(st)/base )
print '{0:>8} &'.format(result),
print r'\\'
| [
"luis@luispedro.org"
] | luis@luispedro.org |
a45dacabb65a8b878d1cb07374fde8bc5ac07d6d | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /7_graph/bfs求无权图的最短路径/广义的bfs/488. 祖玛游戏-bfs剪枝.py | 12413155d1f8a0da0d66c30102d92f4f104f18a7 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 2,038 | py | # 1 <= board.length <= 16
# 1 <= hand.length <= 5
from collections import deque
from functools import lru_cache
import re
# 为什么使用广度优先搜索?
# 因为只需要找出需要回合数最少的方案,因此使用广度优先搜索可以得到可以消除桌面上所有球的方案时就直接返回结果,而不需要继续遍历更多需要回合数更多的方案。
class Solution:
def findMinStep(self, board: str, hand: str) -> int:
"""请你按上述操作步骤移除掉桌上所有球,计算并返回所需的 最少 球数。如果不能移除桌上所有的球,返回 -1 。"""
@lru_cache(None)
def clean(s: str) -> str:
"""碰到三个就删除整个"""
count = 1
while count:
s, count = re.subn(r'(\w)\1{2,}', '', s)
return s
hand = ''.join(sorted(hand))
queue = deque([(board, hand, 0)])
visited = set([(board, hand)])
while queue:
b, h, step = queue.popleft()
if not b:
return step
# 插入位置
for i in range(len(b)):
# 删除那个元素
for j in range(len(h)):
# 最重要的剪枝是,当手上的球 h[j] 和插入位置 i 前后的球 b[i-1], b[i] 三个球各不相同时,插入是不必要的:
sequence = [b[i - 1], b[i], h[j]] if i else [b[i], h[j]]
if len(set(sequence)) < len(sequence):
nextB = clean(b[:i] + h[j] + b[i:])
nextH = h[:j] + h[j + 1 :]
if (nextB, nextH) not in visited:
visited.add((nextB, nextH))
queue.append((nextB, nextH, step + 1))
return -1
print(Solution().findMinStep(board="WRRBBW", hand="RB"))
print(Solution().findMinStep(board="WWRRBBWW", hand="WRBRW"))
# re.subn返回一个元组
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
6981c7c8e8c5f58f7815adf008a36b87eb6bc2e3 | cd93eed33b120d632890a19e692ad318223162ed | /django_doc/manage.py | dd6b50b80fb33de2cc53a024a30dd68a74614073 | [] | no_license | tuyuqi/Django_git_project | 0c78bafccf3091284ea325ee8872b452472158d1 | 01d68a16b8f8b66c5dddda2672f05917b397e230 | refs/heads/master | 2020-04-21T20:08:54.418385 | 2019-02-10T07:35:51 | 2019-02-10T07:35:51 | 169,833,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_doc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"yuqi.tu@gmail.com"
] | yuqi.tu@gmail.com |
8157a66920a4d18df210932be00e7ae648e9bfec | 760ef8fa1fa34f7a0621ed1a72cd4070d5825e34 | /src/tray_world_creator.py | 030feced0d4b97e56653925469dea38f8d799f2b | [] | no_license | michaelarfreed/MotionTaskPlanningProject | 9a2670565fe2923504d678407d3dcb4659bc06df | 384f9801f031f0d413854bc844a452d2681b34af | refs/heads/master | 2023-04-10T12:08:35.538339 | 2021-04-04T23:12:37 | 2021-04-04T23:12:37 | 354,664,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,318 | py | import numpy as np
import openravepy
import utils
TARGET_FILE = '../environments/rll_tray_world.dae'
TABLE_HEIGHT = 0.657
def create_dest(env, destname, pos):
dim = 0.1
THICKNESS = 0.001
surface = openravepy.KinBody.GeometryInfo()
surface._type = openravepy.GeometryType.Box
surface._vGeomData = [dim/2, dim/2, THICKNESS/2]
surface._vDiffuseColor = [1, 0, 0]
dest = openravepy.RaveCreateKinBody(env, '')
dest.InitFromGeometries([surface])
dest.SetName(destname)
t = openravepy.matrixFromPose([1, 0, 0, 0] + list(pos))
dest.SetTransform(t)
return dest
def create_tray(env, t):
dim1 = 0.3048
dim2 = 0.6096
THICKNESS = 0.0095
TRAY_HEIGHT = 0.148
surface = openravepy.KinBody.GeometryInfo()
surface._type = openravepy.GeometryType.Box
surface._vGeomData = [dim1/2, dim2/2, THICKNESS/2]
surface._vDiffuseColor = [0.4, 0.2, 0.4]
surface._t = openravepy.matrixFromPose((1, 0, 0, 0, 0, 0, TRAY_HEIGHT - THICKNESS/2))
standoff = openravepy.KinBody.GeometryInfo()
standoff._type = openravepy.GeometryType.Box
standoff._vGeomData = [0.6*dim1/2, 0.6*dim2/2, TRAY_HEIGHT/2]
standoff._vDiffuseColor = [0.4, 0.2, 0.4]
standoff._t = openravepy.matrixFromPose((1, 0, 0, 0, 0, 0, TRAY_HEIGHT/2))
tray = openravepy.RaveCreateKinBody(env, '')
tray.InitFromGeometries([surface, standoff])
tray.SetName('tray')
pose = openravepy.poseFromMatrix(t)
pose[:4] = [0.7071, 0, 0, 0.7071]
t = openravepy.matrixFromPose(pose)
tray.SetTransform(t)
return tray
def create_cylinder(env, body_name, pos, radius, height):
infocylinder = openravepy.KinBody.GeometryInfo()
infocylinder._type = openravepy.GeometryType.Cylinder
infocylinder._vGeomData = [radius, height]
infocylinder._bVisible = True
infocylinder._vDiffuseColor = [0, 1, 1]
cylinder = openravepy.RaveCreateKinBody(env, '')
cylinder.InitFromGeometries([infocylinder])
cylinder.SetName(body_name)
pos[2] += height/2
cylinder.SetTransform(openravepy.matrixFromPose([1, 0, 0, 0] + pos))
return cylinder
def create_table(env, body_name, dim1, dim2, pos):
THICKNESS = 0.2
LEGDIM1 = 1.3
LEGDIM2 = 0.6
LEGHEIGHT = 0.6 # Doesn't actually determine the height of the table in the env
tabletop = openravepy.KinBody.GeometryInfo()
tabletop._type = openravepy.GeometryType.Box
tabletop._vGeomData = [dim1/2, dim2/2, THICKNESS/2]
tabletop._t[2, 3] = -THICKNESS/2
tabletop._vDiffuseColor = [0.5, 0.2, 0.1]
leg1 = openravepy.KinBody.GeometryInfo()
leg1._type = openravepy.GeometryType.Box
leg1._vGeomData = [LEGDIM1/2, LEGDIM2/2, LEGHEIGHT/2]
leg1._t[0, 3] = dim1/2 - LEGDIM1/2
leg1._t[1, 3] = dim2/2 - LEGDIM2/2
leg1._t[2, 3] = -LEGHEIGHT/2 - THICKNESS/2
leg1._vDiffuseColor = [0.5, 0.2, 0.1]
leg2 = openravepy.KinBody.GeometryInfo()
leg2._type = openravepy.GeometryType.Box
leg2._vGeomData = [LEGDIM1/2, LEGDIM2/2, LEGHEIGHT/2]
leg2._t[0, 3] = dim1/2 - LEGDIM1/2
leg2._t[1, 3] = -dim2/2 + LEGDIM2/2
leg2._t[2, 3] = -LEGHEIGHT/2 - THICKNESS/2
leg2._vDiffuseColor = [0.5, 0.2, 0.1]
leg3 = openravepy.KinBody.GeometryInfo()
leg3._type = openravepy.GeometryType.Box
leg3._vGeomData = [LEGDIM1/2, LEGDIM2/2, LEGHEIGHT/2]
leg3._t[0, 3] = -dim1/2 + LEGDIM1/2
leg3._t[1, 3] = dim2/2 - LEGDIM2/2
leg3._t[2, 3] = -LEGHEIGHT/2 - THICKNESS/2
leg3._vDiffuseColor = [0.5, 0.2, 0.1]
leg4 = openravepy.KinBody.GeometryInfo()
leg4._type = openravepy.GeometryType.Box
leg4._vGeomData = [LEGDIM1/2, LEGDIM2/2, LEGHEIGHT/2]
leg4._t[0, 3] = -dim1/2 + LEGDIM1/2
leg4._t[1, 3] = -dim2/2 + LEGDIM2/2
leg4._t[2, 3] = -LEGHEIGHT/2 - THICKNESS/2
leg4._vDiffuseColor = [0.5, 0.2, 0.1]
table = openravepy.RaveCreateKinBody(env, '')
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4])
table.SetName(body_name)
table.SetTransform(openravepy.matrixFromPose([1, 0, 0, 0] + pos))
return table
env = openravepy.Environment()
env.SetViewer('qtcoin')
# plot origin for sanity
t = np.eye(4)
o = utils.plot_transform(env, t, 1.0)
# spawn PR2
robot = env.ReadRobotXMLFile("robots/pr2-beta-sim.robot.xml")
env.Add(robot)
# spawn main table
env.AddKinBody(create_table(env, 'table', 2.25, 0.94, [2.25, 0, TABLE_HEIGHT]))
# spawn tray table
env.AddKinBody(create_table(env, 'tray_table', 1.525, 0.61, [0.95, 1.0, TABLE_HEIGHT]))
# spawn glasses (object2)
GLASS_HEIGHT = 0.15
GLASS_RADIUS = 0.04
env.AddKinBody(create_cylinder(env, 'object21', [1.4, 0, TABLE_HEIGHT], GLASS_RADIUS, GLASS_HEIGHT))
env.AddKinBody(create_cylinder(env, 'object22', [1.4, 0.2, TABLE_HEIGHT], GLASS_RADIUS, GLASS_HEIGHT))
env.AddKinBody(create_cylinder(env, 'object23', [1.4, -0.2, TABLE_HEIGHT], GLASS_RADIUS, GLASS_HEIGHT))
# spawn bowls (object1)
BOWL_HEIGHT = 0.08
BOWL_RADIUS = 0.06
env.AddKinBody(create_cylinder(env, 'object11', [1.2, 0, TABLE_HEIGHT], BOWL_RADIUS, BOWL_HEIGHT))
env.AddKinBody(create_cylinder(env, 'object12', [1.2, 0.2, TABLE_HEIGHT], BOWL_RADIUS, BOWL_HEIGHT))
env.AddKinBody(create_cylinder(env, 'object13', [1.2, -0.2, TABLE_HEIGHT], BOWL_RADIUS, BOWL_HEIGHT))
# # spawn plates (object3)
# PLATE_HEIGHT = 0.04
# PLATE_RADIUS = 0.08
# env.AddKinBody(create_cylinder(env, 'object31', [1.4, -0.2, TABLE_HEIGHT], PLATE_RADIUS, PLATE_HEIGHT))
# env.AddKinBody(create_cylinder(env, 'object32', [1.2, -0.2, TABLE_HEIGHT], PLATE_RADIUS, PLATE_HEIGHT))
# spawn tray
env.AddKinBody(create_tray(env, openravepy.matrixFromPose((1, 0, 0, 0, 0.5, 0.85, TABLE_HEIGHT))))
# adding destinations
env.AddKinBody(create_dest(env, 'destobject11', (1.8, -0.3, TABLE_HEIGHT)))
env.AddKinBody(create_dest(env, 'destobject12', (2.2, -0.3, TABLE_HEIGHT)))
env.AddKinBody(create_dest(env, 'destobject13', (2.6, -0.3, TABLE_HEIGHT)))
env.AddKinBody(create_dest(env, 'destobject21', (2.0, -0.1, TABLE_HEIGHT)))
env.AddKinBody(create_dest(env, 'destobject22', (2.4, -0.1, TABLE_HEIGHT)))
env.AddKinBody(create_dest(env, 'destobject23', (2.8, -0.1, TABLE_HEIGHT)))
# env.AddKinBody(create_dest(env, 'destobject31', (2.6, -0.3, TABLE_HEIGHT)))
# env.AddKinBody(create_dest(env, 'destobject32', (2.8, -0.1, TABLE_HEIGHT)))
tl1 = create_dest(env, 'trayloc1', (0.5, 0.85, TABLE_HEIGHT))
env.AddKinBody(tl1)
tl2 = create_dest(env, 'trayloc2', (3.2, -0.4, TABLE_HEIGHT))
env.AddKinBody(tl2)
env.Save(TARGET_FILE)
raw_input("Press ENTER to exit!")
| [
"michaelarfreed@gmail.com"
] | michaelarfreed@gmail.com |
64f8f8d3a25cb6ac09de771d500d0936684177e1 | fc155434e4bb00eb1b6f09ccda3cd73235bce156 | /CreateVideo.py | 4d8d947f846bb1a053d68f0ebd03f54d0aac8ad3 | [] | no_license | alter-sachin/buildar_email | 03ac84b2d391cd8951e9311847a97e7d21795a76 | 8dbe19dfe8893747e35051515568133c6079dcf1 | refs/heads/main | 2023-04-21T20:33:48.917393 | 2021-05-16T14:01:17 | 2021-05-16T14:01:17 | 367,877,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | import json
import requests
import constants
api_endpoint = constants.API_ENDPOINT
header_data={'Authorization': constants.API_AUTHKEY}
class Video:
def __init__(self,actorId,audioUrl):
self.actorId = actorId
self.audioUrl = audioUrl
def create_request_body(self):
data = {
"actorId": self.actorId,
"audioUrl": self.audioUrl,
}
return json.dumps(data)
def get_speaker_details(self):
print("implement later")
def create_video(self):
response = requests.post(api_endpoint+"video",
headers=header_data,
data = self.create_request_body())
videoUrl = str(json.loads(response.content.decode('utf-8'))['videoUrl'])
return videoUrl
# videoObj = Video(constants.ACTOR1,"https://dreal.in/aud/1621100562.873771.wav")
# #get_users()
# videoObj.create_video()
# # mailObj = Mailer(from_email,list_of_emails)
# # mailObj.send_email() | [
"guitarplayersachin@gmail.com"
] | guitarplayersachin@gmail.com |
3563587f0871cbdbbb1a1a1c326f78a1f00057a5 | 247342c16db02f52f52e781afae8aaa2ccc467d7 | /translate.py | 71e049c68d51d4af88c600d91b1534d21b18fd4e | [] | no_license | twadada/Machine-Translation | fb7cd6599132efbdd1f6cb7ea4adf4f8bd4c033a | 5619f1276ae2a8954edacc5b6532cfe380921837 | refs/heads/master | 2020-03-25T19:36:25.347588 | 2018-08-14T11:42:29 | 2018-08-14T11:42:29 | 142,395,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,978 | py | import os
import pickle
import argparse
import chainer
from models.preprocessing import Convert_word2id,np
from tqdm import tqdm
from train import NMT
# src_test=/cl/work/takashi-w/ASPEC-JE/test/test.txt.tok.low.ja
# model=Results/ASPEC.encoder_decoder_epoch3_ppl_56.599_best_model
# python translate.py -gpuid 1 -model $model -src_test $src_test -out_attention_weight -k 3 -beam_size 5
parser = argparse.ArgumentParser()
parser.add_argument(
'-gpuid',
type=int,
default=-1,
help='gpuid; -1 means using cpu (default)')
parser.add_argument(
'-src_test',
type=str,
required=True,
help='source test data path')
parser.add_argument(
'-model',
type=str,
required=True,
help='model path')
parser.add_argument(
'-beam_size',
type=int,
default=1,
help='beam size: 1 means greedy decoding (default)')
parser.add_argument(
'-normalize',
action='store_true',
help='normalize decoding probability by length')
parser.add_argument(
'-out_attention_weight',
action='store_true',
help='output attention weight heatmap')
parser.add_argument(
'-k',
type=int,
default=0,
help='output additional txt file that lists top k translation candidates for each soruce sentence'
'\n\n; set k larger than 0 to enable this option; (defalut: disabled)')
class Translator(chainer.Chain):
def __init__(self, encoder, decoder, id2vocab, model):
super().__init__()
with self.init_scope():
self.encoder = encoder
self.decoder = decoder
self.id2vocab = id2vocab
self.model = model
def translate_base(self, s_id, s_lengths, beam_size, normalize, *args):
EOS_id = 0
translation_best_list = []
translation_all_list = []
attention_weight_list = []
print("translating")
for k in tqdm(range(len(s_id))):
h_last, c_last, hs = self.encoder([s_id[k]], [s_lengths[k]]) # (bt * s_len * enc_size)
translation_best, prob_best, translation, translation_prob, attention_p_out = self.decoder.translate(h_last, c_last, hs, beam_size, EOS_id,
normalize, *args)
translation_best_list.append(translation_best)
translation_all_list.append(translation)
attention_weight_list.append(attention_p_out[0])
return translation_best_list,translation_all_list, attention_weight_list
def translate(self, s_id, s_lengths, beam_size, normalize,
out_attention_weight, k, *args):
translation_best_list, translation_all_list, attention_weight_list \
= self.translate_base(s_id, s_lengths, beam_size, normalize,*args)
#save outputs
print("save translations")
translation_word = translator.save_transation(translation_best_list)
if (k>1):
print("save top k translation candidates")
translator.save_topk_translations(translation_all_list, k)
if (out_attention_weight):
print("save attention weight heatmap")
translator.save_attn_weight(s_id, translation_word, attention_weight_list)
def save_transation(self, translation_list):
translation_word = []
tgt_id2vocab = self.id2vocab[1]
f = open(self.model + ".translation.txt", "w+")
for i in range(len(translation_list)):
translation = translation_list[i]
translation = " ".join([tgt_id2vocab[word_id] for word_id in translation])
translation_word.append(translation)
f.write(translation + "\n")
f.close()
return translation_word
def save_topk_translations(self, topk_translation, k):
tgt_id2vocab = self.id2vocab[1]
f = open(self.model + ".translation_top"+str(k)+".txt", "w+")
for i in range(len(topk_translation)):
translation_list = topk_translation[i][0:k]
for sentence in translation_list:
translation = " ".join([tgt_id2vocab[word_id] for word_id in sentence])
f.write(translation + "\n")
f.write('\n')
f.close()
def save_attn_weight(self, s_id, translation_word, attn_wight):
src_id2vocab = self.id2vocab[0]
pdf_pages = PdfPages(self.model+ ".attn_W.pdf")
for k in tqdm(range(len(attn_wight))):
s_id[k] =s_id[k].tolist() #np/cupy_array -> list
attn_wight_tmp = attn_wight[k] # t_len, s_len
translation = translation_word[k].split() + ["<\s>"]
attn_wight_tmp = np.round(attn_wight_tmp, 2) # s_len * bt * 1 * 5 (= window +1)
x_labels = [src_id2vocab[s_id[k][j]] for j in range(len(s_id[k]))]
y_labels = translation
plt.figure(figsize=(len(x_labels) * 0.3, len(y_labels) * 0.3))
ax = sns.heatmap(attn_wight_tmp,
cbar=False,
vmin=0, vmax=1,
cmap="Reds")
ax.set_xticklabels(x_labels, rotation=90)
ax.set_yticklabels(y_labels, rotation=0)
plt.tight_layout()
pdf_pages.savefig()
#plt.savefig(self.model+ ".attn_W" + str(k) + ".pdf")
plt.close('all')
pdf_pages.close()
# merger = PdfFileMerger() #merge all pdfs into one file
# for k in range(len(s_id)):
# merger.append(open(self.model+ ".attn_W" + str(k) + ".pdf", 'rb'))
# os.remove(self.model+ ".attn_W" + str(k) + ".pdf")
# with open(self.model+ ".attn_W.pdf", 'wb') as fout:
# merger.write(fout)
if __name__ == '__main__':
opt = parser.parse_args()
if(opt.out_attention_weight):
import matplotlib
matplotlib.use('Agg')
font = {'family': 'IPAexGothic'}
matplotlib.rc('font', **font)
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from PyPDF2 import PdfFileMerger
print("beam_size: ",opt.beam_size)
print("normalize: ",opt.normalize)
if(opt.k > opt.beam_size):
raise Exception("k must not be larger than beam size")
file = open(opt.model, 'rb')
model = pickle.load(file)
test_lines_id, test_sent_length = Convert_word2id(opt.src_test, model.vocab2id[0])
model.to_cpu()
if opt.gpuid >= 0:
chainer.cuda.get_device_from_id(opt.gpuid).use()
to_gpu = chainer.cuda.to_gpu
model.to_gpu()
####numpy->cupy####
for i in range(len(test_lines_id)):
test_lines_id[i] = to_gpu(test_lines_id[i]) # list of xp.array
translator = Translator(model.encoder,model.decoder, model.id2vocab, opt.model)
translator.translate(test_lines_id, test_sent_length, opt.beam_size, opt.normalize,
opt.out_attention_weight, opt.k)
| [
"takashi.wada.764@gmail.com"
] | takashi.wada.764@gmail.com |
ccc456d17a7c5c5b509e388397e01ad74e2f0559 | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/solvers/tests/test_pde.py | 1b43eb0b0886235a8a8b1b4a593e4e7d486fcfae | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 9,057 | py | from sympy import (Derivative as D, Eq, exp, sin,
Function, Symbol, symbols, cos, log)
from sympy.core import S
from sympy.solvers.pde import (pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol)
from sympy.testing.pytest import raises
a, b, c, x, y = symbols('a b c x y')
def test_pde_separate_add():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
res = pde_separate_add(eq, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x)*exp(-X(x)), D(T(t), t)*exp(T(t))]
def test_pde_separate():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
raises(ValueError, lambda: pde_separate(eq, u(x, t), [X(x), T(t)], 'div'))
def test_pde_separate_mul():
x, y, z, t = symbols("x,y,z,t")
c = Symbol("C", real=True)
Phi = Function('Phi')
F, R, T, X, Y, Z, u = map(Function, 'FRTXYZu')
r, theta, z = symbols('r,theta,z')
# Something simple :)
eq = Eq(D(F(x, y, z), x) + D(F(x, y, z), y) + D(F(x, y, z), z), 0)
# Duplicate arguments in functions
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), u(z, z)]))
# Wrong number of arguments
raises(ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), Y(y)]))
# Wrong variables: [x, y] -> [x, z]
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(t), Y(x, y)]))
assert pde_separate_mul(eq, F(x, y, z), [Y(y), u(x, z)]) == \
[D(Y(y), y)/Y(y), -D(u(x, z), x)/u(x, z) - D(u(x, z), z)/u(x, z)]
assert pde_separate_mul(eq, F(x, y, z), [X(x), Y(y), Z(z)]) == \
[D(X(x), x)/X(x), -D(Z(z), z)/Z(z) - D(Y(y), y)/Y(y)]
# wave equation
wave = Eq(D(u(x, t), t, t), c**2*D(u(x, t), x, x))
res = pde_separate_mul(wave, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x, x)/X(x), D(T(t), t, t)/(c**2*T(t))]
# Laplace equation in cylindrical coords
eq = Eq(1/r * D(Phi(r, theta, z), r) + D(Phi(r, theta, z), r, 2) +
1/r**2 * D(Phi(r, theta, z), theta, 2) + D(Phi(r, theta, z), z, 2), 0)
# Separate z
res = pde_separate_mul(eq, Phi(r, theta, z), [Z(z), u(theta, r)])
assert res == [D(Z(z), z, z)/Z(z),
-D(u(theta, r), r, r)/u(theta, r) -
D(u(theta, r), r)/(r*u(theta, r)) -
D(u(theta, r), theta, theta)/(r**2*u(theta, r))]
# Lets use the result to create a new equation...
eq = Eq(res[1], c)
# ...and separate theta...
res = pde_separate_mul(eq, u(theta, r), [T(theta), R(r)])
assert res == [D(T(theta), theta, theta)/T(theta),
-r*D(R(r), r)/R(r) - r**2*D(R(r), r, r)/R(r) - c*r**2]
# ...or r...
res = pde_separate_mul(eq, u(theta, r), [R(r), T(theta)])
assert res == [r*D(R(r), r)/R(r) + r**2*D(R(r), r, r)/R(r) + c*r**2,
-D(T(theta), theta, theta)/T(theta)]
def test_issue_11726():
x, t = symbols("x t")
f = symbols("f", cls=Function)
X, T = symbols("X T", cls=Function)
u = f(x, t)
eq = u.diff(x, 2) - u.diff(t, 2)
res = pde_separate(eq, u, [T(x), X(t)])
assert res == [D(T(x), x, x)/T(x),D(X(t), t, t)/X(t)]
def test_pde_classify():
# When more number of hints are added, add tests for classifying here.
f = Function('f')
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = x**2*f(x,y) + x*f(x,y).diff(x) + x*y*f(x,y).diff(y)
eq6 = y*x**2*f(x,y) + y*f(x,y).diff(x) + f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
for eq in [eq4, eq5, eq6]:
assert classify_pde(eq) == ('1st_linear_variable_coeff',)
def test_checkpdesol():
f, F = map(Function, ['f', 'F'])
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert checkpdesol(eq, pdsolve(eq))[0]
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = 2*f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
eq6 = f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
assert checkpdesol(eq4, [pdsolve(eq5), pdsolve(eq6)]) == [
(False, (x - 2)*F(3*x - y)*exp(-x/S(5) - 3*y/S(5))),
(False, (x - 1)*F(3*x - y)*exp(-x/S(10) - 3*y/S(10)))]
for eq in [eq4, eq5, eq6]:
assert checkpdesol(eq, pdsolve(eq))[0]
sol = pdsolve(eq4)
sol4 = Eq(sol.lhs - sol.rhs, 0)
raises(NotImplementedError, lambda:
checkpdesol(eq4, sol4, solve_for_func=False))
def test_solvefun():
f, F, G, H = map(Function, ['f', 'F', 'G', 'H'])
eq1 = f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)
assert pdsolve(eq1) == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=G) == Eq(f(x, y), G(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=H) == Eq(f(x, y), H(x - y)*exp(-x/2 - y/2))
def test_pde_1st_linear_constant_coeff_homogeneous():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = 2*u + u.diff(x) + u.diff(y)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(x - y)*exp(-x - y))
assert checkpdesol(eq, sol)[0]
eq = 4 + (3*u.diff(x)/u) + (2*u.diff(y)/u)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(2*x - 3*y)*exp(-S(12)*x/13 - S(8)*y/13))
assert checkpdesol(eq, sol)[0]
eq = u + (6*u.diff(x)) + (7*u.diff(y))
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(7*x - 6*y)*exp(-6*x/S(85) - 7*y/S(85)))
assert checkpdesol(eq, sol)[0]
eq = a*u + b*u.diff(x) + c*u.diff(y)
sol = pdsolve(eq)
assert checkpdesol(eq, sol)[0]
def test_pde_1st_linear_constant_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = -2*u.diff(x) + 4*u.diff(y) + 5*u - exp(x + 3*y)
sol = pdsolve(eq)
assert sol == Eq(f(x,y),
(F(4*x + 2*y) + exp(x/S(2) + 4*y)/S(15))*exp(x/S(2) - y))
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = (u.diff(x)/u) + (u.diff(y)/u) + 1 - (exp(x + y)/u)
sol = pdsolve(eq)
assert sol == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2) + exp(x + y)/S(3))
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = 2*u + -u.diff(x) + 3*u.diff(y) + sin(x)
sol = pdsolve(eq)
assert sol == Eq(f(x, y),
F(3*x + y)*exp(x/S(5) - 3*y/S(5)) - 2*sin(x)/S(5) - cos(x)/S(5))
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + x*y
sol = pdsolve(eq)
assert sol == Eq(f(x, y),
-x*y + x + y + F(x - y)*exp(-x/S(2) - y/S(2)) - 2)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + log(x)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
def test_pdsolve_all():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = u + u.diff(x) + u.diff(y) + x**2*y
sol = pdsolve(eq, hint = 'all')
keys = ['1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral', 'default', 'order']
assert sorted(sol.keys()) == keys
assert sol['order'] == 1
assert sol['default'] == '1st_linear_constant_coeff'
assert sol['1st_linear_constant_coeff'] == Eq(f(x, y),
-x**2*y + x**2 + 2*x*y - 4*x - 2*y + F(x - y)*exp(-x/S(2) - y/S(2)) + 6)
def test_pdsolve_variable_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
sol = pdsolve(eq, hint="1st_linear_variable_coeff")
assert sol == Eq(u, F(x*y)*exp(y**2/2) + 1)
assert checkpdesol(eq, sol)[0]
eq = x**2*u + x*u.diff(x) + x*y*u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(y*exp(-x))*exp(-x**2/2))
assert checkpdesol(eq, sol)[0]
eq = y*x**2*u + y*u.diff(x) + u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(-2*x + y**2)*exp(-x**3/3))
assert checkpdesol(eq, sol)[0]
eq = exp(x)**2*(u.diff(x)) + y
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, y*exp(-2*x)/2 + F(y))
assert checkpdesol(eq, sol)[0]
eq = exp(2*x)*(u.diff(y)) + y*u - u
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, exp((-y**2 + 2*y + 2*F(x))*exp(-2*x)/2))
| [
"kaunalei@gmail.com"
] | kaunalei@gmail.com |
20e3b3314110b5f76c4cbd58f4de848416ac8082 | 35b5ab3c9863f1f151b600f040904ef83a7ba4d0 | /test.py | a08e862b5bcd80c024840892c084bf79e7c09232 | [] | no_license | aaron-alphonsus/style-transfer-flask | 5d9120f93aa16656d9f59062e268cba977d8dadf | 615d5a2a64ada75352018708a626d7abce5a0467 | refs/heads/master | 2020-06-24T21:15:19.997826 | 2019-07-29T20:26:05 | 2019-07-29T20:26:05 | 199,092,710 | 0 | 0 | null | 2019-07-26T23:42:42 | 2019-07-26T23:42:42 | null | UTF-8 | Python | false | false | 8,329 | py | # from google.cloud import storage
import os
from flask import request, send_file
import tempfile
# import cloudstorage as gcs
# from google.appengine.api import images, app_identity
# STYLE_URL = "/style-input-images-1/Vassily_Kandinsky,_1913_-_Composition_7.jpg"
# f = gcs.open(STYLE_URL)
# data = f.read()
# print(data)
# from object_detection import load_object
# from style_transfer import run_style_transfer
import os
import numpy as np
from six.moves.urllib.request import urlopen
import tarfile
# def download_blob(bucket_name, source_blob_name, destination_file_name):
# """Downloads a blob from the bucket."""
# storage_client = storage.Client()
# bucket = storage_client.get_bucket(bucket_name)
# blob = bucket.blob(source_blob_name)
# blob.download_to_filename(destination_file_name)
# print('Blob {} downloaded to {}.'.format(
# source_blob_name,
# destination_file_name))
# def DownloadCheckpointFiles(checkpoint_dir=os.path.abspath("")):
# """Download checkpoint files if necessary."""
# full_checkpoint = "https://download.magenta.tensorflow.org/models/arbitrary_style_transfer.tar.gz"
# # url_prefix = 'http://download.magenta.tensorflow.org/models/'
# # checkpoints = ['multistyle-pastiche-generator-monet.ckpt', 'multistyle-pastiche-generator-varied.ckpt']
# # for checkpoint in checkpoints:
# # full_checkpoint = os.path.join(checkpoint_dir, checkpoint)
# # checkpoint_dir = 'arbitrary_style_transfer'
# if not os.path.exists(checkpoint_dir):
# print('Downloading {}'.format(full_checkpoint))
# filename = full_checkpoint.split("/")[-1]
# with open(filename, "wb") as f:
# r = requests.get(full_checkpoint)
# f.write(r.content)
def DownloadCheckpointFiles(checkpoint_dir=os.path.abspath("")):
"""Download checkpoint files if necessary."""
url_prefix = 'http://download.magenta.tensorflow.org/models/'
checkpoints = ['arbitrary_style_transfer.tar.gz']
path = 'arbitrary_style_transfer'
for checkpoint in checkpoints:
full_checkpoint = os.path.join(checkpoint_dir, checkpoint)
if not os.path.exists(path):
print('Downloading {}'.format(full_checkpoint))
response = urlopen(url_prefix + checkpoint)
data = response.read()
with open(full_checkpoint, 'wb') as fh:
fh.write(data)
unzip_tar_gz()
def unzip_tar_gz():
tf = tarfile.open('arbitrary_style_transfer.tar.gz',"r:gz")
tf.extractall()
tf.close()
DownloadCheckpointFiles()
# unzip_tar_gz()
# DownloadCheckpointFiles()
# detections =[[ 0.5210921 0.68086916 0.6457864 0.81711274 55 0.99862325]
# [ 0.40780655 0.58433187 0.52803445 0.71685034 55 0.99838376]
# [ 0.41141948 0.30821186 0.5301136 0.43577647 55 0.9982496 ]
# [ 0.5196079 0.53558177 0.64213705 0.66894466 55 0.99801254]
# [ 0.41874808 0.44184786 0.5332428 0.57064563 55 0.99788 ]
# [ 0.51584554 0.37531173 0.65214545 0.5233197 55 0.9976165 ]
# [ 0.3246436 0.3725551 0.42766646 0.48901382 55 0.9958526 ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# [ 0. 0. 0. 0. 0. 0. ]
# ]
# zero_index = np.where(detections[:, 4] == 0)[0]
# print(zero_index)
# # print(os.path.basename(
# # 'gs: // style-input-images-1/all_objects.jpg'))
# # style = "static/style_images/Vassily_Kandinsky,_1913_-_Composition_7.jpg"
# # content = "static/input_images/styled.jpg"
# # test = "arbitrary_image_stylization_with_weights \
# # --checkpoint=arbitrary_style_transfer/model.ckpt \
# # --output_dir=outputs \
# # --style_images_paths="+style+"\
# # --content_images_paths="+content+"\
# # --image_size=256 \
# # --content_square_crop=False \
# # --style_image_size=256 \
# # --style_square_crop=False \
# # --logtostderr"
# # path = os.system(test)
# # print(path)
# # from object_detection import blending
# crop_path = 'static/blending/crop.jpg'
# original_path = 'static/blending/original.jpg'
# style_path = 'static/blending/original_stylized_Vassily_Kandinsky,_1913_-_Composition_7_0.jpg'
# print(os.path.join(os.path.abspath(""), style_path))
# print(blending(crop_path, original_path, style_path))
# test1 = None
# def test():
# global test1
# test1 = 'TEST'
# return
# if __name__ == "__main__":
# test()
# print(test1)
# style_file = os.path.join(os.path.abspath(''), 'stylize.jpg')
# model = os.path.join(os.path.abspath(''), 'rcnn_model.pkl')
# show_objects = load_object(style_file, model)
# style_file = request.files['styled.jpg']
# print(style_file)
# style_file = os.path.abspath('../Flask-STWA/stylize.jpg')
# print(os.path.join(os.path.abspath(''), 'stylize.jpg'))
# CLOUD_STORAGE_BUCKET = 'style-input-images-1'
# def upload_to_gcloud(file, destination_blob_name):
# # style_path = request.files['style_file']
# storage_client = storage.Client(project='amli-245518')
# bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)
# blob = bucket.blob(destination_blob_name)
# blob.upload_from_filename(file)
# return blob.public_url
# style_file = os.path.join(os.path.abspath(''), 'styled.jpg')
# url = upload_to_gcloud(style_file, 'styled.jpg')
# print(url)
# style_file = os.path.join(os.path.abspath(''), 'stylize.jpg')
# CLOUD_STORAGE_BUCKET = 'style-input-images-1'
# best, best_loss = run_style_transfer(
# content_path, style_path, num_iterations=3)
# storage_client = storage.Client(project='amli-245518')
# bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)
# destination_blob_name = 'styled-test.jpg'
# blob = bucket.blob(destination_blob_name)
# blob.upload_from_filename(style_file)
# def upload():
# style_file = os.path.join(os.path.abspath(''), 'stylize.jpg')
# CLOUD_STORAGE_BUCKET = 'style-input-images-1'
# best, best_loss = run_style_transfer(
# content_path, style_path, num_iterations=3)
# storage_client = storage.Client(project='amli-245518')
# bucket = storage_client.get_bucket(CLOUD_STORAGE_BUCKET)
# destination_blob_name = 'stylize.jpg'
# blob = bucket.blob(destination_blob_name)
# blob.upload_from_filename(style_file)
# def download():
# CLOUD_STORAGE_BUCKET = 'style-input-images-1'
# style_file = 'stylize.jpg'
# # style_file = os.path.abspath('../Flask-STWA/stylize.jpg')
# client = storage.Client()
# bucket = client.get_bucket(CLOUD_STORAGE_BUCKET)
# blob = bucket.blob(style_file)
# filename = 'new.jpg'
# blob.download_to_filename(filename)
# # return send_file(temp.name, attachment_filename=style_file)
| [
"huhuang@cs.hmc.edu"
] | huhuang@cs.hmc.edu |
429cff803102a46433f095f3ba8e41199c5dcae2 | 47257562cd6db4185c34ab582f8ea311c3155c45 | /DDPG_HER_VIME/Smoothing.py | cf7379093370f65b961a366d14338f46d0e9a24e | [] | no_license | JuliusVsi/HVDDPG_Project | f25ebe429c0ed0ea806b2f41d108f60e798b9a39 | a21c6b89ef8e9ab2092a2dcc329b83f6b7813fcd | refs/heads/master | 2020-07-17T02:09:38.593852 | 2019-09-02T19:25:32 | 2019-09-02T19:25:32 | 205,919,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,970 | py | import matplotlib.pyplot as plt
import numpy as np
def smooth(data, weight=0.85):
last = data[0]
smoothed = []
for point in data:
smoothed_val = last * weight + (1 - weight) * point
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
if __name__ == '__main__':
########################################################################
# The Reach Figure
########################################################################
plt.figure('Reach')
reach_her = np.loadtxt('Plot_Data/Reach_HER.txt', delimiter=',')
x_reach_her = np.arange(len(reach_her))
smoothed_reach_her = np.array(smooth(reach_her, weight=0.4))
smoothed_reach_her_bar = np.array(smooth(reach_her, weight=0.1))
np.savetxt('Plot_Data/Smoothed_Reach_HER.txt', smoothed_reach_her, fmt='%f', delimiter=',')
d_reach_her = smoothed_reach_her - smoothed_reach_her_bar
plt.plot(x_reach_her, smoothed_reach_her, label='DDPG+HER')
plt.fill_between(x_reach_her, smoothed_reach_her + d_reach_her, smoothed_reach_her - d_reach_her, color='blue',
alpha=0.1)
reach_vime = np.loadtxt('Plot_Data/Reach_VIME.txt', delimiter=',')
x_reach_vime = np.arange(len(reach_vime))
smoothed_reach_vime = np.array(smooth(reach_vime, weight=0.4))
smoothed_reach_vime_bar = np.array(smooth(reach_vime, weight=0.1))
np.savetxt('Plot_Data/Smoothed_Reach_VIME.txt', smoothed_reach_vime, fmt='%f', delimiter=',')
d_reach_vime = smoothed_reach_vime - smoothed_reach_vime_bar
plt.plot(x_reach_vime, smoothed_reach_vime, label='HVDDPG')
plt.fill_between(x_reach_vime, smoothed_reach_vime + d_reach_vime, smoothed_reach_vime - d_reach_vime,
color='orange', alpha=0.1)
reach_ddpg = np.loadtxt('Plot_Data/Reach_DDPG.txt', delimiter=',')
x_reach_ddpg = np.arange(len(reach_ddpg))
smoothed_reach_ddpg = np.array(smooth(reach_ddpg, weight=0.6))
smoothed_reach_ddpg_bar = np.array(smooth(reach_ddpg, weight=0.3))
np.savetxt('Plot_Data/Smoothed_Reach_DDPG.txt', smoothed_reach_ddpg, fmt='%f', delimiter=',')
d_reach_ddpg = smoothed_reach_ddpg - smoothed_reach_ddpg_bar
plt.plot(x_reach_ddpg, smoothed_reach_ddpg, label='DDPG')
plt.fill_between(x_reach_ddpg, smoothed_reach_ddpg + d_reach_ddpg, smoothed_reach_ddpg - d_reach_ddpg,
color='orange', alpha=0.1)
reach_dqn = np.loadtxt('Plot_Data/Reach_DQN.txt', delimiter=',')
x_reach_dqn = np.arange(len(reach_dqn))
smoothed_reach_dqn = np.array(smooth(reach_dqn, weight=0.7))
smoothed_reach_dqn_bar = np.array(smooth(reach_dqn, weight=0.0))
np.savetxt('Plot_Data/Smoothed_Reach_DDPG.txt', smoothed_reach_dqn, fmt='%f', delimiter=',')
d_reach_dqn = smoothed_reach_dqn - smoothed_reach_dqn_bar
plt.plot(x_reach_dqn, smoothed_reach_dqn, label='DQN')
plt.fill_between(x_reach_dqn, smoothed_reach_dqn + d_reach_dqn, smoothed_reach_dqn - d_reach_ddpg,
color='orange', alpha=0.1)
plt.grid()
plt.ylim((-0.05, 1.05))
plt.xlabel('Episodes')
plt.ylabel('Success Rate')
plt.legend(loc='lower right')
########################################################################
# The Push Figure
########################################################################
plt.figure('Push')
push_her = np.loadtxt('Plot_Data/Push_HER.txt', delimiter=',')
x_push_her = np.arange(len(push_her))
smoothed_push_her = np.array(smooth(push_her, weight=0.9))
smoothed_push_her_bar = np.array(smooth(push_her, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Push_HER.txt', smoothed_push_her, fmt='%f', delimiter=',')
d_push_her = smoothed_push_her - smoothed_push_her_bar
temp_bar_1 = np.maximum(smoothed_push_her + d_push_her, 0)
temp_bar_2 = np.maximum(smoothed_push_her - d_push_her, 0)
plt.plot(x_push_her, smoothed_push_her, label='DDPG+HER')
plt.fill_between(x_push_her, temp_bar_1, temp_bar_2, color='blue', alpha=0.1)
push_vime = np.loadtxt('Plot_Data/Push_VIME.txt', delimiter=',')
push_vime = push_vime[0:100]
x_push_vime = np.arange(len(push_vime))
smoothed_push_vime = np.array(smooth(push_vime, weight=0.9))
smoothed_push_vime_bar = np.array(smooth(push_vime, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Push_VIME.txt', smoothed_push_vime, fmt='%f', delimiter=',')
d_push_vime = smoothed_push_vime - smoothed_push_vime_bar
temp_bar_1 = np.maximum(smoothed_push_vime + d_push_vime, 0)
temp_bar_2 = np.maximum(smoothed_push_vime - d_push_vime, 0)
plt.plot(x_push_vime, smoothed_push_vime, label='HVDDPG')
plt.fill_between(x_push_vime, temp_bar_1, temp_bar_2, color='orange', alpha=0.1)
push_ddpg = np.loadtxt('Plot_Data/Push_DDPG.txt', delimiter=',')
x_push_ddpg = np.arange(len(push_ddpg))
smoothed_push_ddpg = np.array(smooth(push_ddpg, weight=0.9))
smoothed_push_ddpg_bar = np.array(smooth(push_ddpg, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Push_DDPG.txt', smoothed_push_ddpg, fmt='%f', delimiter=',')
d_push_ddpg = smoothed_push_ddpg - smoothed_push_ddpg_bar
temp_bar_1 = np.maximum(smoothed_push_ddpg + d_push_ddpg, 0)
temp_bar_2 = np.maximum(smoothed_push_ddpg - d_push_ddpg, 0)
plt.plot(x_push_ddpg, smoothed_push_ddpg, label='DDPG')
plt.fill_between(x_push_ddpg, temp_bar_1, temp_bar_2, color='green', alpha=0.1)
push_dqn = np.loadtxt('Plot_Data/Push_DQN.txt', delimiter=',')
x_push_dqn = np.arange(len(push_dqn))
smoothed_push_dqn = np.array(smooth(push_dqn, weight=0.9))
smoothed_push_dqn_bar = np.array(smooth(push_dqn, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Push_DQN.txt', smoothed_push_dqn, fmt='%f', delimiter=',')
d_push_dqn = smoothed_push_dqn - smoothed_push_dqn_bar
temp_bar_1 = np.maximum(smoothed_push_dqn + d_push_dqn, 0)
temp_bar_2 = np.maximum(smoothed_push_dqn - d_push_dqn, 0)
plt.plot(x_push_dqn, smoothed_push_dqn, label='DQN')
plt.fill_between(x_push_dqn, temp_bar_1, temp_bar_2, color='red', alpha=0.1)
plt.grid()
plt.ylim((-0.05, 1.05))
plt.xlabel('Episodes')
plt.ylabel('Success Rate')
plt.legend(loc='upper left')
########################################################################
# The Pick Figure
########################################################################
plt.figure('Pick')
pick_her = np.loadtxt('Plot_Data/Pick_HER.txt', delimiter=',')
x_pick_her = np.arange(len(pick_her))
smoothed_pick_her = np.array(smooth(pick_her, weight=0.98))
smoothed_pick_her_bar = np.array(smooth(pick_her, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Pick_HER.txt', smoothed_pick_her, fmt='%f', delimiter=',')
d_pick_her = smoothed_pick_her - smoothed_pick_her_bar
temp_bar_1 = np.maximum(smoothed_pick_her + d_pick_her, 0)
temp_bar_2 = np.maximum(smoothed_pick_her - d_pick_her, 0)
plt.plot(x_pick_her, smoothed_pick_her, label='DDPG+HER')
plt.fill_between(x_pick_her, temp_bar_1, temp_bar_2, color='blue', alpha=0.1)
pick_vime = np.loadtxt('Plot_Data/Pick_VIME.txt', delimiter=',')
x_pick_vime = np.arange(len(pick_vime))
smoothed_pick_vime = np.array(smooth(pick_vime, weight=0.98))
smoothed_pick_vime_bar = np.array(smooth(pick_vime, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Pick_VIME.txt', smoothed_pick_vime, fmt='%f', delimiter=',')
d_pick_vime = smoothed_pick_vime - smoothed_pick_vime_bar
temp_bar_1 = np.maximum(smoothed_pick_vime + d_pick_vime, 0)
temp_bar_2 = np.maximum(smoothed_pick_vime - d_pick_vime, 0)
plt.plot(x_pick_vime, smoothed_pick_vime, label='HVDDPG')
plt.fill_between(x_pick_vime, temp_bar_1, temp_bar_2, color='orange', alpha=0.1)
pick_ddpg = np.loadtxt('Plot_Data/Pick_DDPG.txt', delimiter=',')
x_pick_ddpg = np.arange(len(pick_ddpg))
smoothed_pick_ddpg = np.array(smooth(pick_ddpg, weight=0.9))
smoothed_pick_ddpg_bar = np.array(smooth(pick_ddpg, weight=0.0))
np.savetxt('Plot_Data/Smoothed_Pick_DDPG.txt', smoothed_pick_ddpg, fmt='%f', delimiter=',')
d_pick_ddpg = smoothed_pick_ddpg - smoothed_pick_ddpg_bar
temp_bar_1 = np.maximum(smoothed_pick_ddpg + d_pick_ddpg, 0)
temp_bar_2 = np.maximum(smoothed_pick_ddpg - d_pick_ddpg, 0)
plt.plot(x_pick_ddpg, smoothed_pick_ddpg, label='DDPG')
plt.fill_between(x_pick_ddpg, temp_bar_1, temp_bar_2, color='green', alpha=0.1)
pick_dqn = np.loadtxt('Plot_Data/Pick_DQN.txt', delimiter=',')
x_pick_dqn = np.arange(len(pick_dqn))
smoothed_pick_dqn = np.array(smooth(pick_dqn, weight=0.9))
smoothed_pick_dqn_bar = np.array(smooth(pick_dqn, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Pick_DQN.txt', smoothed_pick_dqn, fmt='%f', delimiter=',')
d_pick_dqn = smoothed_pick_dqn - smoothed_pick_dqn_bar
temp_bar_1 = np.maximum(smoothed_pick_dqn + d_pick_dqn, 0)
temp_bar_2 = np.maximum(smoothed_pick_dqn - d_pick_dqn, 0)
plt.plot(x_pick_dqn, smoothed_pick_dqn, label='DQN')
plt.fill_between(x_pick_dqn, temp_bar_1, temp_bar_2, color='red', alpha=0.1)
plt.grid()
plt.ylim((-0.05, 1.05))
plt.xlabel('Episodes')
plt.ylabel('Success Rate')
plt.legend(loc='upper left')
########################################################################
# The Slide Figure
########################################################################
plt.figure('Slide')
# her
slide_her = np.loadtxt('Plot_Data/Slide_HER.txt', delimiter=',')
x_slide_her = np.arange(len(slide_her))
smoothed_slide_her = np.array(smooth(slide_her, weight=0.98))
smoothed_slide_her_bar = np.array(smooth(slide_her, weight=0.8))
np.savetxt('Plot_Data/Smoothed_Slide_HER.txt', smoothed_slide_her, fmt='%f', delimiter=',')
d_slide_her = smoothed_slide_her - smoothed_slide_her_bar
temp_bar_1 = np.maximum(smoothed_slide_her + d_slide_her, 0)
temp_bar_2 = np.maximum(smoothed_slide_her - d_slide_her, 0)
plt.plot(x_slide_her, smoothed_slide_her, label='DDPG+HER')
plt.fill_between(x_slide_her, temp_bar_1, temp_bar_2, color='blue', alpha=0.1)
# hvddpg
slide_vime = np.loadtxt('Plot_Data/Slide_VIME.txt', delimiter=',')
x_slide_vime = np.arange(len(slide_vime))
smoothed_slide_vime = np.array(smooth(slide_vime, weight=0.98))
smoothed_slide_vime_bar = np.array(smooth(slide_vime, weight=0.7))
np.savetxt('Plot_Data/Smoothed_Slide_VIME.txt', smoothed_slide_vime, fmt='%f', delimiter=',')
d_slide_vime = smoothed_slide_vime - smoothed_slide_vime_bar
temp_bar_1 = np.maximum(smoothed_slide_vime + d_slide_vime, 0)
temp_bar_2 = np.maximum(smoothed_slide_vime - d_slide_vime, 0)
plt.plot(x_slide_vime, smoothed_slide_vime, label='HVDDPG')
plt.fill_between(x_slide_vime, temp_bar_1, temp_bar_2, color='orange', alpha=0.1)
# ddpg
slide_ddpg = np.loadtxt('Plot_Data/Slide_DDPG.txt', delimiter=',')
x_slide_ddpg = np.arange(len(slide_ddpg))
smoothed_slide_ddpg = np.array(smooth(slide_ddpg, weight=0.98))
smoothed_slide_ddpg_bar = np.array(smooth(slide_ddpg, weight=0.5))
np.savetxt('Plot_Data/Smoothed_Slide_DDPG.txt', smoothed_slide_ddpg, fmt='%f', delimiter=',')
d_slide_ddpg = smoothed_slide_ddpg - smoothed_slide_ddpg_bar
temp_bar_1 = np.maximum(smoothed_slide_ddpg + d_slide_ddpg, 0)
temp_bar_2 = np.maximum(smoothed_slide_ddpg - d_slide_ddpg, 0)
plt.plot(x_slide_ddpg, smoothed_slide_ddpg, label='DDPG')
plt.fill_between(x_slide_ddpg, temp_bar_1, temp_bar_2, color='green', alpha=0.1)
slide_dqn = np.loadtxt('Plot_Data/Slide_DQN.txt', delimiter=',')
x_slide_dqn = np.arange(len(slide_dqn))
smoothed_slide_dqn = np.array(smooth(slide_dqn, weight=0.98))
smoothed_slide_dqn_bar = np.array(smooth(slide_dqn, weight=0.0))
np.savetxt('Plot_Data/Smoothed_Slide_DQN.txt', smoothed_slide_dqn, fmt='%f', delimiter=',')
d_slide_dqn = smoothed_slide_dqn - smoothed_slide_dqn_bar
temp_bar_1 = np.maximum(smoothed_slide_dqn + d_slide_dqn, 0)
temp_bar_2 = np.maximum(smoothed_slide_dqn - d_slide_dqn, 0)
plt.plot(x_slide_dqn, smoothed_slide_dqn, label='DQN')
plt.fill_between(x_slide_dqn, temp_bar_1, temp_bar_2, color='red', alpha=0.1)
plt.grid()
plt.ylim((-0.05, 1.05))
plt.xlabel('Episodes')
plt.ylabel('Success Rate')
plt.legend(loc='upper left')
########################################################################
# The Egg Figure
########################################################################
plt.figure('Egg')
# her
egg_her = np.loadtxt('Plot_Data/Egg_HER.txt', delimiter=',')
x_egg_her = np.arange(len(egg_her))
smoothed_egg_her = np.array(smooth(egg_her, weight=0.98))
smoothed_egg_her_bar = np.array(smooth(egg_her, weight=0.3))
np.savetxt('Plot_Data/Smoothed_Egg_HER.txt', smoothed_egg_her, fmt='%f', delimiter=',')
d_egg_her = smoothed_egg_her - smoothed_egg_her_bar
temp_bar_1 = np.maximum(smoothed_egg_her + d_egg_her, 0)
temp_bar_2 = np.maximum(smoothed_egg_her - d_egg_her, 0)
plt.plot(x_egg_her, smoothed_egg_her, label='DDPG+HER')
plt.fill_between(x_egg_her, temp_bar_1, temp_bar_2, color='blue', alpha=0.1)
# hvddpg
egg_vime = np.loadtxt('Plot_Data/Egg_VIME.txt', delimiter=',')
x_egg_vime = np.arange(len(egg_vime))
smoothed_egg_vime = np.array(smooth(egg_vime, weight=0.98))
smoothed_egg_vime_bar = np.array(smooth(egg_vime, weight=0.5))
np.savetxt('Plot_Data/Smoothed_Egg_VIME.txt', smoothed_egg_vime, fmt='%f', delimiter=',')
d_egg_vime = smoothed_egg_vime - smoothed_egg_vime_bar
temp_bar_1 = np.maximum(smoothed_egg_vime + d_egg_vime, 0)
temp_bar_2 = np.maximum(smoothed_egg_vime - d_egg_vime, 0)
plt.plot(x_egg_vime, smoothed_egg_vime, label='HVDDPG')
plt.fill_between(x_egg_vime, temp_bar_1, temp_bar_2, color='orange', alpha=0.1)
# ddpg
egg_ddpg = np.loadtxt('Plot_Data/Egg_DDPG.txt', delimiter=',')
x_egg_ddpg = np.arange(len(egg_ddpg))
smoothed_egg_ddpg = np.array(smooth(egg_ddpg, weight=0.98))
smoothed_egg_ddpg_bar = np.array(smooth(egg_ddpg, weight=0.2))
np.savetxt('Plot_Data/Smoothed_Egg_DDPG.txt', smoothed_egg_ddpg, fmt='%f', delimiter=',')
d_egg_ddpg = smoothed_egg_ddpg - smoothed_egg_ddpg_bar
temp_bar_1 = np.maximum(smoothed_egg_ddpg + d_egg_ddpg, 0)
temp_bar_2 = np.maximum(smoothed_egg_ddpg - d_egg_ddpg, 0)
plt.plot(x_egg_ddpg, smoothed_egg_ddpg, label='DDPG')
plt.fill_between(x_egg_ddpg, temp_bar_1, temp_bar_2, color='green', alpha=0.1)
########################################################################
# The pen Figure
########################################################################
plt.figure('pen')
# her
pen_her = np.loadtxt('Plot_Data/Pen_HER.txt', delimiter=',')
pen_her = pen_her[0:800]
x_pen_her = np.arange(len(pen_her))
smoothed_pen_her = np.array(smooth(pen_her, weight=0.99))
smoothed_pen_her_bar = np.array(smooth(pen_her, weight=0.3))
np.savetxt('Plot_Data/Smoothed_Pen_HER.txt', smoothed_pen_her, fmt='%f', delimiter=',')
d_pen_her = smoothed_pen_her - smoothed_pen_her_bar
temp_bar_1 = np.maximum(smoothed_pen_her + d_pen_her, 0)
temp_bar_2 = np.maximum(smoothed_pen_her - d_pen_her, 0)
plt.plot(x_pen_her, smoothed_pen_her, label='DDPG+HER')
plt.fill_between(x_pen_her, temp_bar_1, temp_bar_2, color='blue', alpha=0.1)
# hvddpg
pen_vime = np.loadtxt('Plot_Data/Pen_VIME.txt', delimiter=',')
x_pen_vime = np.arange(len(pen_vime))
smoothed_pen_vime = np.array(smooth(pen_vime, weight=0.98))
smoothed_pen_vime_bar = np.array(smooth(pen_vime, weight=0.5))
np.savetxt('Plot_Data/Smoothed_Pen_VIME.txt', smoothed_pen_vime, fmt='%f', delimiter=',')
d_pen_vime = smoothed_pen_vime - smoothed_pen_vime_bar
temp_bar_1 = np.maximum(smoothed_pen_vime + d_pen_vime, 0)
temp_bar_2 = np.maximum(smoothed_pen_vime - d_pen_vime, 0)
plt.plot(x_pen_vime, smoothed_pen_vime, label='HVDDPG')
plt.fill_between(x_pen_vime, temp_bar_1, temp_bar_2, color='orange', alpha=0.1)
# ddpg
pen_ddpg = np.loadtxt('Plot_Data/Pen_DDPG.txt', delimiter=',')
x_pen_ddpg = np.arange(len(pen_ddpg))
smoothed_pen_ddpg = np.array(smooth(pen_ddpg, weight=0.98))
smoothed_pen_ddpg_bar = np.array(smooth(pen_ddpg, weight=0.5))
np.savetxt('Plot_Data/Smoothed_Pen_DDPG.txt', smoothed_pen_ddpg, fmt='%f', delimiter=',')
d_pen_ddpg = smoothed_pen_ddpg - smoothed_pen_ddpg_bar
temp_bar_1 = np.maximum(smoothed_pen_ddpg + d_pen_ddpg, 0)
temp_bar_2 = np.maximum(smoothed_pen_ddpg - d_pen_ddpg, 0)
plt.plot(x_pen_ddpg, smoothed_pen_ddpg, label='DDPG')
plt.fill_between(x_pen_ddpg, temp_bar_1, temp_bar_2, color='green', alpha=0.1)
plt.grid()
plt.ylim((-0.05, 1.05))
plt.xlabel('Episodes')
plt.ylabel('Success Rate')
plt.legend(loc='upper left')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
34acd628169af951f3576d45c718a7a03b92b726 | 6919841828cf32f3e4b0f6f12f6d54be23321888 | /exp/face24.py | db4baf6b7976aeb66a2d63d236bbd38f6c51d276 | [] | no_license | ayanc/rpgan | bea6d283278dcead5fa4c63815a70162d5fe3173 | cb4d4ab226fa78e4d6f6dcae9fa715952345a0bb | refs/heads/master | 2021-01-22T04:23:39.000116 | 2018-06-27T21:43:45 | 2018-06-27T21:43:45 | 92,459,728 | 20 | 6 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # Generator Parameters
ksz=4
zlen = 100 # Dimensionality of z
f1 = 1024 # Features in first layer of Gen output
# Discrimnator Parameters
df = 128 # No. of hidden features (at first layer of D)
# Training set
imsz = 64
bsz = 64
lfile='data/faces.txt'
crop=False
# Learning parameters
wts_dir='models/face24'
SAVEFREQ=1e3
MAXITER=1e5
| [
"ayanc@ttic.edu"
] | ayanc@ttic.edu |
2400de35f3a6c6902ae173e097d54b31040a551a | 2cbf3aaad62f4922d827af658fb5dbb7ac651bef | /teledusite/teledu/models/conceptAttribute.py | fc12d964e90614a6ff7813077017d177a3c7fecb | [] | no_license | tctimmeh/teledu | 0266240aa864cd2eed75857e66eaeb8270f44c1a | 04135ffb04f397f29152ca48f868a957b18d504a | refs/heads/master | 2021-01-23T08:52:32.817693 | 2013-10-29T01:34:41 | 2013-10-29T01:34:41 | 2,566,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from django.db import models
from concept import Concept
from attribute import Attribute
class ConceptAttribute(Attribute):
concept = models.ForeignKey(Concept, related_name = 'attributes')
class Meta:
app_label = 'teledu'
unique_together = (('concept', 'name'))
def __unicode__(self):
return '%s - %s' % (self.concept.name, self.name)
def conceptName(self):
return self.concept.name
def gameSystem(self):
return self.concept.gameSystem
def getAttributeValuesForInstance(self, instance):
from conceptAttributeValue import ConceptAttributeValue
return ConceptAttributeValue.objects.filter(attribute = self, instance = instance)
| [
"tctimmeh@gmail.com"
] | tctimmeh@gmail.com |
0154452e8030cb69acdc0d908d2e28193782d8cc | 1f9f0e49c2dd5104e24a63416a4f773c017793db | /parse_dir.py | eee4110418bbbda9f46243fcea474adbbd76cad9 | [] | no_license | sameeul/duplication_detector | ca6cb957e24e443da92cccea1342d0d4484d8e93 | 3e374ea0274c1983e78e416a623cd05da3f270a5 | refs/heads/master | 2020-09-13T07:33:30.400569 | 2019-11-22T03:19:45 | 2019-11-22T03:19:45 | 222,696,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,137 | py | import argparse
import operator
#set up command line processing
parser = argparse.ArgumentParser(description = "Find duplication in a directory")
parser.add_argument("--name", required = True, type = str, help = "Text file containing file listing")
args = parser. parse_args()
input_file_name=args.name
# now work on finding duplicates
file_dict = {}
status_code = {"release":2, "review":3, "in-process":5}
with open(input_file_name) as fp:
for line in fp:
line = line.strip()
file_name_data = line.split("\\")
if len(file_name_data) == 3:
file_name = file_name_data[-1].lower()
status = file_name_data[1].lower()
if status in status_code:
if file_name in file_dict:
file_dict[file_name] = file_dict[file_name]*status_code[status]
else:
file_dict[file_name] = status_code[status]
#processing done, now print
file_in_two_dir = []
file_in_three_dir = []
for file_name in file_dict:
if file_dict[file_name] > 5 and file_dict[file_name] < 30:
file_in_two_dir.append(file_name)
if file_dict[file_name] == 30:
file_in_three_dir.append(file_name)
print("Total %d files are unique"%(len(file_dict)-len(file_in_two_dir)-len(file_in_three_dir)))
print("Total %d files are duplicate across two directories"%(len(file_in_two_dir)))
print("Total %d files are duplicate across three directories"%(len(file_in_three_dir)))
print("Total %d files are duplicate"%(len(file_in_two_dir)+len(file_in_three_dir)))
duplicate_files = file_in_two_dir+file_in_three_dir
if len(duplicate_files) != 0:
duplicate_files.sort()
print("\nList of duplicate files:\n")
print("Review | Release | In-Process | Filename")
print("----------------------------------------")
for file_name in duplicate_files:
line = ""
if file_dict[file_name] % status_code["review"] == 0:
line = line + " X "
else:
line = line + " "
if file_dict[file_name] % status_code["release"] == 0:
line = line + " X "
else:
line = line + " "
if file_dict[file_name] % status_code["in-process"] == 0:
line = line + " X "
else:
line = line + " "
line = line+file_name
print(line)
#list top 10 duplication series
#optional
series_dict = {}
for file in duplicate_files:
file_comp = file.split("-")
series_start_code = file_comp[0]
if len(file_comp) > 2:
if series_start_code in series_dict:
series_dict[series_start_code] = series_dict[series_start_code]+1
else:
series_dict[series_start_code] = 1
sorted_series_list = sorted(series_dict.items(), key=operator.itemgetter(1), reverse=True)
print("\nMost Duplicated Series:\n")
print("Series Code Number of Duplicates")
print("------------------------------------------")
for data in sorted_series_list[0:10]:
print("%s %d"%(data[0], data[1]))
| [
"noreply@github.com"
] | noreply@github.com |
d3a08515917f144e92f34f6bfbc54f112d6bbd36 | d58d8affb44c98db7af8264ebbda303dc0271bcb | /python/selenium/Browser_ipvt.py | 4f1a908b608a1fce8052fd1163890bd06d324d78 | [] | no_license | jijunjun1112/python | 5946504ffcc6235f57a535dc6a978e0f8e3ba43f | 5753fcf8d1f20d9cdb441cbf44509edb32c5fcec | refs/heads/master | 2021-01-01T04:56:39.374901 | 2016-06-02T11:44:20 | 2016-06-02T11:44:20 | 56,660,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | #coding:utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains #引入ActionChains鼠标操作类
from selenium.webdriver.common.keys import Keys #引入keys类操作
import time
import sys
class Browser_ipvt(object):
"""This is user class."""
url=""
browser=""
conf_url=""
def scheduleconfer(self):
self.browser.find_element_by_id("scheduleMeeting").click()
time.sleep(1)
self.browser.find_element_by_id("meetingTheme").send_keys("sunny")
self.browser.find_element_by_id("proAccount").send_keys("8200433")
self.browser.find_element_by_xpath(".//*[@class='btn btn-blue meetingSubmit']").click()
time.sleep(1)
def __init__(self, url):
self.url = url
def showClassName(self):
print self.__class__.__name__
def showClassDoc(self):
print self.__class__.__doc__
def quitBrowser(self):
self.browser.quit()
print "Now quit the browser"
def openBrowser(self):
self.browser= webdriver.Firefox()
self.browser.get(self.url)
self.browser.maximize_window()
time.sleep(1)
print "Open gvc client web!"
def login(self):
self.browser.find_element_by_id("uerName").send_keys("jijunjun1112")
self.browser.find_element_by_id("password").send_keys("Jun13676830606")
self.browser.find_element_by_xpath(".//*[@id='loginForm']/div[4]/button").click()
time.sleep(1)
print "Has already login in as jijunjun1112!"
def main():
browser_ipvt=Browser_ipvt('http://account.ipvideotalk.com/login/')
browser_ipvt.openBrowser()
browser_ipvt.login()
browser_ipvt.scheduleconfer()
browser_ipvt.quitBrowser()
if __name__ == '__main__':
main()
| [
"jjji@grandstream.cn"
] | jjji@grandstream.cn |
5e795c4e386973917ef112bcb637b06aa09e031e | 4a31e84a4a6cf5c4cdf4757cca63ed6ff92088bc | /Python/optimization/ga.py | bc5e41cfed7bdb031e3b8adfb4fe881261d36a7f | [] | no_license | aldo-fsm/tests | e91ac45709a7dbffdd7984f1bc799a68d1c86d9f | 9d0317236f459a00af56aaee738b2a46365a7b99 | refs/heads/master | 2021-01-20T08:44:12.225578 | 2018-07-17T17:48:05 | 2018-07-17T17:48:05 | 101,570,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,329 | py | import numpy as np
import re
def optimize(costFunction, chromSize, popSize, selectionRate, mutRate, **kwargs):
kwargs.setdefault('pairing', 'rank_weighting')
kwargs.setdefault('pairingTounamentSize', 3)
kwargs.setdefault('crossover', 'single_point')
kwargs.setdefault('uniformCrossoverProb', 0.5)
popKeepSize = round(selectionRate*popSize)
pop = np.round(np.random.rand(popSize, chromSize))
numPairs = np.ceil((popSize-popKeepSize)/2)
generation = 0
while True:
print("Generation: {}".format(generation))
costs = np.array([costFunction(chrom) for chrom in pop])
sortIndexes = np.argsort(costs)
costs = costs[sortIndexes]
pop = pop[sortIndexes]
for (chrom, cost) in zip(pop, costs):
print(" {0} ..... {1}".format(re.sub(r'\D', '', str(chrom)), cost))
popKeep = pop[:popKeepSize]
parents1, parents2 = selectPairs(popKeep, costs, numPairs,
kind=kwargs['pairing'],
tournamentSize=kwargs['pairingTounamentSize'])
children = crossover(parents1, parents2,
kind=kwargs['crossover'],
uniformProb=kwargs['uniformCrossoverProb'])[:popSize-popKeepSize]
pop = np.concatenate([popKeep,children], axis=0)
yield pop
mutation(pop, np.ceil((popSize-1)*chromSize*mutRate))
generation+=1
def selectPairs(popKeep, costs, numberPairs, kind='rank_weighting', **kwargs):
kwargs.setdefault('tournamentSize', 3)
popKeepSize = len(popKeep)
numberPairs = int(numberPairs)
if kind == 'top_to_bottom':
p1 = np.arange(0, popKeepSize, 2)[:numberPairs]
p2 = p1 + 1
elif kind == 'random':
p1 = np.random.choice(popKeepSize, numberPairs)
p2 = np.random.choice(popKeepSize, numberPairs)
elif kind == 'cost_weighting':
costs = (costs-costs[popKeepSize])[:popKeepSize]
costsSum = np.sum(costs[:popKeepSize])
if costsSum == 0:
return selectPairs(popKeep, costs, numberPairs, kind='random')
prob = costs/costsSum
cumProb = np.cumsum(prob)
random1 = np.random.rand(numberPairs)
random2 = np.random.rand(numberPairs)
p1 = [argfirst(lambda n : n > r, cumProb) for r in random1]
p2 = [argfirst(lambda n : n > r, cumProb) for r in random2]
for i in range(len(p1)):
if p1[i] == p2[i] :
p2[i] = np.random.choice(popKeepSize)
elif kind == 'rank_weighting':
ranks = list(range(popKeepSize))
ranksSum = np.sum(ranks)
prob = [(popKeepSize-i-1)/ranksSum for i in ranks]
cumProb = np.cumsum(prob)
random1 = np.random.rand(numberPairs)
random2 = np.random.rand(numberPairs)
p1 = [argfirst(lambda n : n > r, cumProb) for r in random1]
p2 = [argfirst(lambda n : n > r, cumProb) for r in random2]
for i in range(len(p1)):
if p1[i] == p2[i] :
p2[i] = np.random.choice(popKeepSize)
elif kind == 'tournament':
p1 = []
p2 = []
for _ in range(numberPairs):
group1 = np.random.choice(popKeepSize, kwargs['tournamentSize'])
group2 = np.random.choice(popKeepSize, kwargs['tournamentSize'])
p1.append(np.argmin(costs[group1]))
p2.append(np.argmin(costs[group2]))
return (popKeep[p1], popKeep[p2])
def crossover(parents1, parents2, kind='single_point', **kwargs):
kwargs.setdefault('uniformProb', 0.5)
children = []
if kind == 'single_point':
for p1, p2 in zip(parents1,parents2):
crossoverPoint = np.random.choice(len(p1)-1)+1
# pylint: disable=unbalanced-tuple-unpacking
p1Left, p1Right = np.split(p1, [crossoverPoint])
p2Left, p2Right = np.split(p2, [crossoverPoint])
c1 = np.concatenate([p1Left, p2Right])
c2 = np.concatenate([p2Left, p1Right])
children += [c1, c2]
elif kind == 'double_point':
for p1, p2 in zip(parents1,parents2):
crossoverPoints = np.sort(np.random.choice(len(p1)-1, 2, replace=False)+1)
# pylint: disable=unbalanced-tuple-unpacking
l1, m1, r1 = np.split(p1, crossoverPoints)
l2, m2, r2 = np.split(p2, crossoverPoints)
c1 = np.concatenate([l1, m2, r1])
c2 = np.concatenate([l2, m1, r2])
children += [c1, c2]
elif kind == 'uniform':
for p1, p2 in zip(parents1, parents2):
prob = kwargs['uniformProb']
aux = list(map(lambda x: int(x >= prob), np.random.rand(len(p1))))
pair = [p1, p2]
c1 = np.array([pair[aux[i]][i] for i in range(len(p1))])
c2 = np.array([pair[::-1][aux[i]][i] for i in range(len(p1))])
children += [c1, c2]
return children
def mutation(pop, numMut):
popSize = len(pop)
chromSize = len(pop[0])
chrom = np.random.choice(popSize-1, int(numMut))+1
bit = np.random.choice(chromSize, int(numMut))
for i, j in zip(chrom, bit):
pop[i,j] = np.abs(pop[i,j]-1)
def argfirst(condition, iterable):
return next(x[0] for x in enumerate(iterable) if condition(x[1]))
| [
"aldo.fsmonteiro@gmail.com"
] | aldo.fsmonteiro@gmail.com |
38c9d1b686c768cb8aa82bee7276bc14769b92c1 | b4d1de38688da22e64026b0b46a1e0ae3efa97ed | /server_random_data_generator.py | 8fb1dd5ec17ffda8aa8e0965dc3111af27b0e041 | [] | no_license | misrori/streaming_with_mtcars | a8f57e25f7a5389dc593a1e0e72bf944a7dc0abf | bb459983c7bfd6e648bf46685809e5e7b48c8d36 | refs/heads/master | 2021-01-18T22:40:54.167528 | 2017-04-05T07:36:57 | 2017-04-05T07:36:57 | 87,066,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | #!/home/mihaly/hadoop/anaconda3/bin/python
from random import *
import random
import time
from datetime import datetime
def client_thread(conn, ip, port, MAX_BUFFER_SIZE = 4096):
mtcars = open('mtcars.csv')
my_line = str(random.sample(mtcars.readlines(),1)).replace('[','').replace(']','').replace('\'','').replace('\"','').replace('\\n','')
mtcars.close()
my_st = datetime.now().strftime('%H:%M:%S') +','+ my_line + '\n'
try:
conn.send(my_st.encode("utf8"))
return 0
except:
return -1
def start_server():
import socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket created')
soc.bind(("localhost", 9000))
print('Socket bind complete')
#Start listening on socket
soc.listen(10)
print('Socket now listening')
while True:
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
print('Accepting connection from ' + ip + ':' + port)
time.sleep(1)
while True: #conn.__getstate__:
if client_thread(conn, ip, port) != 0:
break
time.sleep(randint(0,1)*0.1)
print(".", end='')
soc.close()
start_server()
| [
"ormraat.pte@gmail.com"
] | ormraat.pte@gmail.com |
04cb69832818c5318fc8810a89edf011e8167b66 | 29be7382240c4f5ae23a5c06c75b8dda8043800c | /PRO-99/removeFiles.py | 1c67cc9e2e7c73e21edf8f6268542f638458bd93 | [] | no_license | ABHINAV-KUMAR213/PRO-99 | d05e37e91d1305c7a4494475f05bf0d47adc48e2 | f282ed9429b20bc7ab87e066c1ceb5cab1edd734 | refs/heads/main | 2023-06-10T11:52:16.112611 | 2021-06-29T12:52:54 | 2021-06-29T12:52:54 | 381,363,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | import os
import shutil
import time
def main():
deleted_folders_count = 0
deleted_files_count = 0
path = "e:\Coding\Python\Project99"
days = 30
seconds = time.time() - (days * 24 * 60 * 60)
if os.path.exists(path):
for root_folder, folders, files in os.walk(path):
if seconds >= get_file_or_folder_age(root_folder):
remove_folder(root_folder)
deleted_folders_count += 1
break
else:
for folder in folders:
folder_path = os.path.join(root_folder, folder)
if seconds >= get_file_or_folder_age(folder_path):
remove_folder(folder_path)
deleted_folders_count += 1
for file in files:
file_path = os.path.join(root_folder, file)
if seconds >= get_file_or_folder_age(file_path):
remove_file(file_path)
deleted_files_count += 1
else:
if seconds >= get_file_or_folder_age(path):
remove_file(path)
deleted_files_count += 1
else:
print(f'"{path}" is not found')
deleted_files_count += 1
print(f"Total folders deleted: {deleted_folders_count}")
print(f"Total files deleted: {deleted_files_count}")
def remove_folder(path):
if not shutil.rmtree(path):
print(f"{path} is removed successfully")
else:
print(f"Unable to delete the "+path)
def remove_file(path):
if not os.remove(path):
print(f"{path} is removed successfully")
else:
print("Unable to delete the "+path)
def get_file_or_folder_age(path):
ctime = os.stat(path).st_ctime
return ctime
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
9d07edf3f66ec3474be91599c596f9476549ed59 | 4f60de93ad66f3ff95a5b86c41293dc95ecc4a2a | /hr/models.py | 09d2094f1645475c50fb3e6fabdabcb064e8e6d0 | [] | no_license | xxoxx/Luer | 87a541f40a12f4aff262d03b9e8e3b4b78a41511 | 24333edd06dc5e52775329db1f86cf862c68c56e | refs/heads/master | 2021-01-20T17:27:13.914273 | 2015-12-09T15:18:35 | 2015-12-09T15:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | #coding:utf-8
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import default
# Create your models here.
class Department(models.Model):
department_name = models.CharField(max_length = 100)
department_manager = models.CharField(max_length = 100)
def __unicode__(self):
return self.department_name
class Employee_extra(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length = 100)
password = models.CharField(max_length = 100)
sex = models.CharField(max_length = 100)
email = models.EmailField()
department = models.ForeignKey(Department)
position = models.CharField(max_length = 100)
base_salary = models.FloatField()
extra_salary = models.FloatField(default=0)
hiredate = models.DateField()
pic = models.ImageField(upload_to='upload')
class Meta:
permissions = (('department_manager','department_manager'),
('personnel_assistant','personnel_assistant'),
('personnel_manager','personnel_manager'),
('admin','admin'),
)
class Attendence(models.Model):
attendence_name = models.CharField(max_length = 100)
is_early_or_late = models.BooleanField()
attendence_date = models.DateField()
class Salary(models.Model):
name = models.CharField(max_length = 100)
year = models.CharField(max_length = 100)
month = models.CharField(max_length = 100)
salary = models.FloatField()
class Announce(models.Model):
title = models.CharField(max_length = 100)
content = models.TextField()
date = models.DateField()
def __unicode__(self):
return self.title
| [
"lhlsec@gmail.com"
] | lhlsec@gmail.com |
e8a5e9cff601be65ad6eb8f089f1c1bbc68ad329 | 5c6238e52a3b58cfa56df2b0376fe972d4eda055 | /time/clock_back/clock_back.py | 9e3ea9bad8288a2a23031157d5bae86b63553156 | [] | no_license | yhoazk/LF331_V2 | 25258da02654726a79e645f73616978dedf01064 | 5ca1c2620fbc295cc735f38acf7a6027fc4b9ae2 | refs/heads/master | 2023-06-30T15:18:31.283875 | 2023-06-18T11:28:18 | 2023-06-18T11:28:18 | 109,063,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #!/usr/bin/env python3
from matplotlib import pyplot as plt
import csv
def main():
print("sdfsdf")
data = []
with open("/tmp/samples.log") as csv_file:
samples = csv.reader(csv_file)
for row in samples:
data.append(int(row[0]))
data_diff = [j-i for i, j in zip(data[:-1], data[1:])]
for d in data_diff:
if d < 0:
print("NEGGGGGG")
plt.plot(data, marker="x")
plt.show()
if __name__ == "__main__":
main()
| [
"yhoazk@gmail.com"
] | yhoazk@gmail.com |
6cf4963fd3bf6b82bb24b9ffda645ecbcba72071 | b10163d7036e0bd2bd2a59f1a8a96951ed169c21 | /arvore de natal.py | 6ac49eb9524abea4b33adf4c6ca5d714d0eb8552 | [] | no_license | YK0L0DIY/Python | 4e97e3e180b51e6571d667a3996b04851461fd62 | 1991cdb8669c6e532766231ce63854956ec5b0e8 | refs/heads/master | 2020-03-19T13:59:46.415839 | 2019-02-15T20:27:06 | 2019-02-15T20:27:06 | 136,604,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | def arvore(a):
v=a
cont=2
u=a-1
print(" "*u,"*")
while(v!=1):
u=u-1
print(" "*u,"*"*(1+cont))
cont=cont+2
v=v-1
print(" "*(a-1),"*")
print(" "*(a-1),"*")
h=int(input("Indique a altura da arvore: "))
arvore(h)
| [
"noreply@github.com"
] | noreply@github.com |
277eefec07dfbb8604106fa7fb9dbdc5dff111a0 | 4819d5b68ac2708cd9f7e238790568f6fb62bf4a | /shop/ShopSerializers.py | c96c2565256fbd1eb6e73cbd9da8599cb60a0676 | [] | no_license | suyashgithub/django-rest | f8c60efaa95ad74220be6d44c766181614b55adc | bb41c41713983ee95fab5d1a799b96231b817305 | refs/heads/master | 2022-11-29T06:02:04.119874 | 2020-08-13T22:08:21 | 2020-08-13T22:08:21 | 287,384,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from rest_framework import serializers
from shop.models import Shop
class ShopSerializer(serializers.ModelSerializer):
created_at = serializers.DateTimeField(format="%Y-%m-%d %H:%M:%S", read_only=True)
username = serializers.SerializerMethodField("get_username")
class Meta:
model = Shop
fields = ['id', 'status', 'name', 'user','username', 'created_at']
def get_username(self, obj):
return obj.user.username
def create(self, validated_data):
"""
Create and return a new `Shop` instance, given the validated data.
"""
meetings = Shop.objects.create(**validated_data)
return meetings
def update(self, instance, validated_data):
"""
Update and return an existing `Shop` instance, given the validated data.
"""
instance.name = validated_data.get('name', instance.name)
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance | [
"suyash.mishra@thinkmantra.com"
] | suyash.mishra@thinkmantra.com |
1570433f75a24869c25727245eb54167af8b00ab | bdbb20034786685601d0a424f29ea8bd04755419 | /stopWait/client/client.py | 9bcb24ee87990c4551f147c2c65755c851585681 | [] | no_license | s19-nets/udp-file-transfer-mc | 48df5efc60af4b41caf48a06381c45c17e640f4f | 8a5aff2b3142589f1e1ebb4702bdafe1ef0ed4ae | refs/heads/master | 2020-04-27T19:41:42.532207 | 2019-03-09T07:14:55 | 2019-03-09T07:14:55 | 174,629,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | from socket import *
serverAddress = ('localhost', 50000)
ack = 0
last = 0
doneNum = 0
seqnum = 0
done = False
retry = False
#Get user input
input = input("What would you like to do?")
input_list = input.split()
#Create socket and send user input
clientSocket = socket(AF_INET, SOCK_DGRAM)
clientSocket.sendto(input.encode(), serverAddress)
#GET file
if(input_list[0].upper() == "GET"):
with open('get_file', 'wb') as f:
print("Receiving:", input_list[1])
while True and not done:
#Retrieve seqNum and done number (0 = not done, 1 = done)
message, serverAddrPort = clientSocket.recvfrom(1000)
splitMessage = message.decode('utf_8', 'ignore').split()
seqNum = splitMessage[0]
doneNum = splitMessage[1]
if(int(doneNum) == 1):
done = True
#skip over the seqNum & doneNum
if(int(seqNum) <10 ):
text = message.decode('utf_8', 'ignore')[4:]
elif(int(seqNum) >=10 and int(seqNum) <100):
text = message.decode('utf_8', 'ignore')[5:]
else:
text = message.decode('utf_8', 'ignore')[6:]
#Check to see if it's retrying
if(int(seqNum) != ack):
ack= ack+1
retry = False
else:
retry = True
#send ack num
sACK = str(ack)
clientSocket.sendto(sACK.encode(), serverAddress)
if not message:
done = True
break
#write message to a file if not retrying
if not retry:
f.write(text.encode())
print('Done Receiving')
f.close()
#PUT file
if(input_list[0].upper() == "PUT"):
print("Sending", (input_list[1]))
#Open requested file
file = open(input_list[1].encode(),'rb')
l = file.read(90);
while (l):
#increment seqnum if not retrying
if(not retry):
seqnum = seqnum +1
#prepare and insert into package
sSeqnum = str(seqnum) + " "
doneNum = str(doneNum)
emptyString = " "
package = sSeqnum.encode()+ doneNum.encode() + emptyString.encode() + l
#send to client and receive acknowledgement
clientSocket.sendto(package, serverAddress)
ACK, cAddr = clientSocket.recvfrom(100)
#Check to see if ack number is correct
if(seqnum != int(ACK.decode())):
retry = True
#if it is, keep reading file
if(not retry):
l = file.read(80)
#check to see if end of file
if(len(l) < 80):
doneNum = 1
print('Done Sending')
file.close()
| [
"noreply@github.com"
] | noreply@github.com |
7e625fa71bfa5efc76d0547775d9fb3a2dad6b0b | a1b308d27c922bfa66137151c93b817c7667c4b2 | /src/chatbot/cfg/Chatbot.cfg | db50fd43913f3a9ef49e554ad40a58952d093c60 | [
"MIT"
] | permissive | dergkat/HEAD | 64e81390ffcd04795ef8414376b2d12813e4e42c | 0e1a034dbdc052e5e573787f49c9f36a01459373 | refs/heads/master | 2021-05-01T05:45:17.810469 | 2017-06-07T09:01:19 | 2017-06-07T09:01:19 | 79,799,750 | 0 | 1 | null | 2017-01-23T11:50:33 | 2017-01-23T11:50:33 | null | UTF-8 | Python | false | false | 1,484 | cfg | #!/usr/bin/env python
PACKAGE = 'chatbot'
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
gen.add("enable", bool_t, 0, "Enable Chatbot", True)
gen.add("chatbot_url", str_t, 0, "Chatbot Server URL", 'http://localhost:8001')
gen.add("sentiment", bool_t, 0, "Enable Sentiment", True)
gen.add("delay_response", bool_t, 0, "Delay Chatbot Response", False)
gen.add("delay_time", double_t, 0, "Response Delay Time", 5, 0, 10)
gen.add("ignore_indicator", bool_t, 0, "Ignore the indicator from response", False)
gen.add("set_that", str_t, 0, "Set 'that' tag", '')
gen.add("set_context", str_t, 0, "Add Chatbot Context (k=v,k2=v2,...)", '')
gen.add("mute", bool_t, 0, "Mute the Chatbot", False)
weights = gen.add_group("Weights", state=True)
weights.add("sophia", double_t, 0, "Weight for Sophia tier", 0.9, 0, 1)
weights.add("cs", double_t, 0, "Weight for ChatScript tier", 1, 0, 1)
weights.add("generic", double_t, 0, "Weight for generic tier", 1, 0, 1)
weights.add("early_pickup", double_t, 0, "Weight for early pickup tier", 0.2, 0, 1)
weights.add("mid_pickup", double_t, 0, "Weight for middle pickup tier", 0.2, 0, 1)
weights.add("ddg", double_t, 0, "Weight for duck duck go tier", 0.3, 0, 1)
weights.add("markov", double_t, 0, "Weight for random sentense generator tier", 0.1, 0, 1)
weights.add("reset", bool_t, 0, "Reset the weight to defaults", False)
# package name, node name, config name
exit(gen.generate(PACKAGE, "chatbot", "Chatbot"))
| [
"dut.hww@gmail.com"
] | dut.hww@gmail.com |
1b5acb3b130ca25e0213d632d2799da91b52eb84 | 3021d12339945053e89f9d9571f8054d86963088 | /scripts/sma-client.py | 205f5fd47aa8db61eb60b6e5fe90ad80c3ead6e0 | [
"Intel",
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | tmakatos/spdk | 9d18a586000fc25f659bb185ebb30b68e0073aa7 | a82d6f009f1d43cb8107ec0637d9e458c319b369 | refs/heads/master | 2022-11-15T06:32:52.922360 | 2022-05-11T16:24:09 | 2022-10-21T16:42:58 | 220,963,857 | 0 | 1 | NOASSERTION | 2020-09-16T20:46:56 | 2019-11-11T11:14:11 | C | UTF-8 | Python | false | false | 2,551 | py | #!/usr/bin/env python3
from argparse import ArgumentParser
import grpc
import google.protobuf.json_format as json_format
import importlib
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(__file__) + '/../python')
import spdk.sma.proto.sma_pb2 as sma_pb2 # noqa
import spdk.sma.proto.sma_pb2_grpc as sma_pb2_grpc # noqa
import spdk.sma.proto.nvmf_tcp_pb2 as nvmf_tcp_pb2 # noqa
import spdk.sma.proto.nvmf_tcp_pb2_grpc as nvmf_tcp_pb2_grpc # noqa
class Client:
def __init__(self, addr, port):
self._service = sma_pb2.DESCRIPTOR.services_by_name['StorageManagementAgent']
self.addr = addr
self.port = port
def _get_message_type(self, descriptor):
return getattr(sma_pb2, descriptor.name)
def _get_method_types(self, method_name):
method = self._service.methods_by_name.get(method_name)
return (self._get_message_type(method.input_type),
self._get_message_type(method.output_type))
def call(self, method, params):
with grpc.insecure_channel(f'{self.addr}:{self.port}') as channel:
stub = sma_pb2_grpc.StorageManagementAgentStub(channel)
func = getattr(stub, method)
input, output = self._get_method_types(method)
response = func(request=json_format.ParseDict(params, input()))
return json_format.MessageToDict(response,
preserving_proto_field_name=True)
def load_plugins(plugins):
for plugin in plugins:
logging.debug(f'Loading external plugin: {plugin}')
module = importlib.import_module(plugin)
def parse_argv():
parser = ArgumentParser(description='Storage Management Agent client')
parser.add_argument('--address', '-a', default='localhost',
help='IP address of SMA instance to connect to')
parser.add_argument('--port', '-p', default=8080, type=int,
help='Port number of SMA instance to connect to')
return parser.parse_args()
def main(args):
argv = parse_argv()
logging.basicConfig(level=os.environ.get('SMA_LOGLEVEL', 'WARNING').upper())
load_plugins(filter(None, os.environ.get('SMA_PLUGINS', '').split(':')))
client = Client(argv.address, argv.port)
request = json.loads(sys.stdin.read())
result = client.call(request['method'], request.get('params', {}))
print(json.dumps(result, indent=2))
if __name__ == '__main__':
main(sys.argv[1:])
| [
"tomasz.zawadzki@intel.com"
] | tomasz.zawadzki@intel.com |
736b25d9dd6ccb8c04ea88d667c2c6ec69bf359c | 6aee5e2775a99de940f9deef3d34e9833d44819c | /lesson/day03/test.py | c202ccce8b5f53b0a53bb44e0089f589ac030e8b | [] | no_license | lyl0521/lesson-0426 | c7669767cf5f6bae54765f2246764e1a06acee7a | 07f114af276932c252478d5a739ca88b4bae0fa2 | refs/heads/master | 2020-05-17T17:21:00.876376 | 2019-05-07T01:54:56 | 2019-05-07T01:54:56 | 183,849,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # all()
print(all([0,1,2,3])) # False
print(all([1,2,3])) # True
print(all(['a','b','c',''])) # False
print(all(['a','b','c','d'])) #True
print(all(('a','b','c','d'))) #True
print(all(('a','b','','c'))) #False
print(all((0,1,2,3))) #False
print(all((1,2,3))) #True
print(all([])) # True empty list
print(all(())) # True empty tuple
# any()
# 如果都为空、0、false,则返回false,如果不都为空、0、false,则返回true。
print(any(['a', 'b', 'c', 'd'])) #True
print(any(['a', 'b', '', 'd'])) #True
print(any([0, '', False])) #False
print(any(('a', 'b', 'c', 'd'))) #True
print(any(('a', 'b', '', 'd'))) #True
print(any((0, '', False))) #False
print(any([])) #True
print(any(())) #True
# bin()
# 返回整数的二进制表示
print(bin(int(10)))
print(bin(10))
| [
"1247702289@qq.com"
] | 1247702289@qq.com |
406f62c74fe702355e8c72a821dc3983799b0ff2 | 2477ba1cd7e04b641f95b800e188d7525d39ab46 | /Monochrome_sketches/stripe/stripe.pyde | 31e0e239c6a31336735627ca8ac6824354f8348a | [] | no_license | lsm35/ProcessingSketchbook | ec6ddf875caa87f1ccfefefde690c9dd6a0260f8 | d15ae1dcdcc920f583bc911c0a5cf72f58821dd1 | refs/heads/master | 2021-03-31T02:00:24.209133 | 2018-03-17T00:48:40 | 2018-03-17T00:48:40 | 124,572,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | pyde | size(400,400);
background(60);
Ncols = width;
Nrows = height;
# Set up 2D array
myArray = [[0]*Ncols]*Nrows;
for i in range(0,Ncols,20):
for j in range(0,Nrows,20):
myArray[i][j] = int(random(255));
stroke(myArray[i][j]);
rect(i,j,1/2,j/2);
save("stripe.tif") | [
"lsm35@cam.ac.uk"
] | lsm35@cam.ac.uk |
f8cff679622d038367e9ba166acddf2c25db3acc | 8126a1246a7fadcba2a57afdec210e6c00dd85ff | /tests/data_objects/constraints/constraints_object_test.py | 556692a6d6f5a40717a7b875d031a5c4615430be | [] | no_license | TF-185/cra-princess-cp1 | 0961f0bb140a89efb427e4e497318a563a564b75 | cbe5dab43983f794a817cd891973f68789ebd3b4 | refs/heads/master | 2023-05-30T10:45:24.635862 | 2019-10-11T19:55:00 | 2019-10-11T19:55:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,433 | py | # """
# constraints_object_test.py
#
# Module to test constraints_object.py.
# Author: Tameem Samawi (tsamawi@cra.com)
# """
#
# import unittest
# from cp1.common.exception_class import ConstraintsObjectInitializationException
# from cp1.data_objects.processing.constraints_object import ConstraintsObject
# from cp1.data_objects.processing.channel import Channel
# from cp1.data_objects.mdl.txop_timeout import TxOpTimeout
# from cp1.data_objects.mdl.bandwidth_types import BandwidthTypes
# from cp1.data_objects.mdl.bandwidth_rate import BandwidthRate
# from cp1.data_objects.mdl.kbps import Kbps
# from cp1.data_objects.mdl.mdl_id import MdlId
# from cp1.data_objects.mdl.frequency import Frequency
# from cp1.data_objects.processing.ta import TA
#
#
# class ConstraintsObjectTest(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.goal_throughput_bulk = BandwidthRate(BandwidthTypes.BULK, Kbps(10))
# cls.goal_throughput_voice = BandwidthRate(BandwidthTypes.VOICE, Kbps(10))
# cls.goal_throughput_safety = BandwidthRate(BandwidthTypes.SAFETY, Kbps(10))
# cls.latency = time(microsecond=50000)
# cls.guard_band = Milliseconds(1000)
# cls.epoch = Milliseconds(100000)
# cls.txop_timeout = TxOpTimeout(255)
#
# channel_frequency = Frequency(4919500000)
# channel_length = Milliseconds(100)
# channel_latency = Milliseconds(50)
# channel_capacity = Kbps(100)
# cls.channels = [Channel(channel_frequency, channel_length, channel_latency, channel_capacity)]
#
# ta_id_ = MdlId('TA1')
# ta_minimum_voice_bandwidth = Kbps(100)
# ta_minimum_safety_bandwidth = Kbps(75)
# ta_scaling_factor = 1
# ta_c = 0.05
# ta_min_value = 65
# ta = TA(ta_id_, ta_minimum_voice_bandwidth, ta_minimum_safety_bandwidth, ta_scaling_factor,
# ta_c)
# cls.candidate_tas = [ta]
#
# def test_valid_constraints_object_init(self):
# constraints_object = ConstraintsObject(
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
# self.assertEqual(self.goal_throughput_bulk,
# constraints_object.goal_throughput_bulk)
# self.assertEqual(self.goal_throughput_voice,
# constraints_object.goal_throughput_voice)
# self.assertEqual(self.goal_throughput_safety,
# constraints_object.goal_throughput_safety)
# self.assertEqual(self.latency,
# constraints_object.latency)
# self.assertEqual(self.guard_band,
# constraints_object.guard_band)
# self.assertEqual(self.epoch,
# constraints_object.epoch)
#
# tas_equal = True
# for i in range(0, len(self.candidate_tas)):
# if self.candidate_tas[i] != constraints_object.candidate_tas[i]:
# tas_equal = False
# self.assertTrue(tas_equal)
#
# channels_equal = True
# for i in range(0, len(self.channels)):
# if self.channels[i] != constraints_object.channels[i]:
# channels_equal = False
# self.assertTrue(channels_equal)
#
# def test___eq__(self):
# c1 = ConstraintsObject(
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
# c2 = ConstraintsObject(
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
# self.assertEqual(c1, c2)
#
# def test_invalid_goal_throughput_bulk(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# 1,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
#
# def test_invalid_goal_throughput_voice(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# 1,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
#
# def test_invalid_goal_throughput_safety(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# 1,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
#
# def test_invalid_guard_band_type(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# 1,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
#
# def test_invalid_epoch_type(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# 1,
# self.txop_timeout,
# self.candidate_tas,
# self.channels)
#
# def test_invalid_txop_timeout(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# 1,
# self.candidate_tas,
# self.channels)
#
# def test_invalid_candidate_tas_type(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# 1,
# self.channels)
#
# def test_invalid_channels_type(self):
# self.assertRaises(
# ConstraintsObjectInitializationException,
# ConstraintsObject,
# self.goal_throughput_bulk,
# self.goal_throughput_voice,
# self.goal_throughput_safety,
# self.latency,
# self.guard_band,
# self.epoch,
# self.txop_timeout,
# self.candidate_tas,
# 1)
| [
"tsamawi@cra.com"
] | tsamawi@cra.com |
48dfd03ed0706d7a711f1e304f10e7e7b6b13b01 | 0d59d36b45143e8710d6a19dce41c7a8fc14fc3b | /tasker/libs/client_desktop/event.py | 9d9d31fa183128fc1573f381b098c9c7f4010c83 | [] | no_license | nkitsan/trello_clone | 4d09b8f5bfe1b77ce2d4a5eb6eb49492b229b1fc | c86c83f4fe76323bc558911cb81bbfe234bfe3ce | refs/heads/master | 2020-04-13T12:06:29.129019 | 2018-12-26T15:29:07 | 2018-12-26T15:29:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,251 | py | import requests
import click
from tasker.libs.client_desktop.helper import HOST, date_validation
from tasker.libs.client_desktop.access import read_api
@click.group()
def event_operations():
"""Here is commands which allow to work with calendar events"""
@event_operations.command(short_help='Add the event to the calendar')
@click.option('--name', default='', help='Name of the event which will be added')
@click.option('--date', required=True, help='Date of event')
def add_event(name, date):
if not date_validation(date):
click.echo('Format of date should be Y-M-D H:M')
return
api = read_api()
if api is None:
click.echo('Use login --api to register your api key and work further')
return
url = HOST + api + '/events'
data = {'event_name': name, 'event_date': date}
event_response = requests.post(url=url, data=data).json()
if 'error' in event_response:
click.echo(event_response['error'])
return
for event_id in event_response:
click.echo(event_id + ': ' + event_response[event_id]['name'] + '\ndate: '
+ ' '.join(event_response[event_id]['date'][0:-1].split('T')))
return
@event_operations.command(short_help='Change the event to the calendar')
@click.option('--event_id', required=True, type=click.INT, help='ID of the event to change')
@click.option('--name', default=None, help='Name of the event on which will be renewed')
@click.option('--date', default=None, help='Date of event on which will be renewed')
def change_event(event_id, name, date):
api = read_api()
if api is None:
click.echo('Use login --api to register your api key and work further')
return
event_id = str(event_id)
url = HOST + api + '/events/' + event_id
data = {}
if name is not None:
data.update({'event_name': name})
if date is not None:
data.update({'event_date': date})
event_response = requests.put(url=url, data=data).json()
if 'error' in event_response:
click.echo(event_response['error'])
return
click.echo(event_id + ': ' + event_response[event_id]['name'] + '\ndate: '
+ ' '.join(event_response[event_id]['date'][0:-1].split('T')))
return
@event_operations.command(short_help='Delete the event from the calendar')
@click.option('--event_id', required=True, type=click.INT, help='ID of the event to delete')
def delete_event(event_id):
api = read_api()
if api is None:
click.echo('Use login --api to register your api key and work further')
return
url = HOST + api + '/events'
data = {'event_id': str(event_id)}
event_response = requests.delete(url=url, data=data).json()
if 'error' in event_response:
click.echo(event_response['error'])
return
click.echo('event was deleted')
return
@event_operations.command(short_help='Show events in the calendar')
def show_events():
api = read_api()
if api is None:
click.echo('Use login --api to register your api key and work further')
return
url = HOST + api + '/events'
event_response = requests.get(url=url).json()
if 'error' in event_response:
click.echo(event_response['error'])
return
events = ''
for event_id in event_response:
events += (event_id + ' ' + event_response[event_id]['name'] + '\ndate:' +
' '.join(event_response[event_id]['date'][0:-1].split('T')) + '\n')
click.echo(events.rstrip('\n'))
return
@event_operations.command(short_help='Show the event from the calendar')
@click.option('--event_id', required=True, type=click.INT, help='ID of the event to see')
def show_event(event_id):
api = read_api()
if api is None:
click.echo('Use login --api to register your api key and work further')
return
event_id = str(event_id)
url = HOST + api + '/events/' + event_id
event_response = requests.get(url=url).json()
if 'error' in event_response:
click.echo(event_response['error'])
return
click.echo(event_id + ': ' + event_response[event_id]['name'] + '\ndate: '
+ ' '.join(event_response[event_id]['date'][0:-1].split('T')))
return
| [
"nastik.kitsan@gmail.com"
] | nastik.kitsan@gmail.com |
32a624033c4fcd4b0dab2f56ea427738fac85532 | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/16_Web_Services/k_Projects/b_twitter/twitter_scrapping.py | e22fe2652cae147f89fc3a8955b3336f6f812e4b | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 2,215 | py | #!/usr/bin/python
"""
Purpose: Twitter data scrapping
"""
import tweepy
class TwitterLogin:
def __init__(self):
consumer_key = "xxxxxxxxxxxxxxxxxxxxx"
consumer_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
access_token = "00000-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
access_token_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(
auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True
)
def credentials_verification(self):
result = vars(self.api.verify_credentials())["_json"]
print(
f"""Account
User : {result['name']}
Screen Name : {result['screen_name']}
Location : {result['location']}
Profile description : {result['description']}
Account Created at : {result['created_at']}
Display URL : {result['entities']['url']['urls'][0]['display_url']}
"""
)
class TwitterScrapping(TwitterLogin):
def __init__(self):
TwitterLogin.__init__(self)
# twtr = TwitterLogin()
# twtr.credentials_verification()
twrt_scrp = TwitterScrapping()
twrt_scrp.credentials_verification()
# Latest Public Timeline
tweet = twrt_scrp.api.home_timeline()[0]
print(
f"""
tweet.text : {tweet.text}
tweet.contributors : {tweet.contributors}
tweet.created_at : {tweet.created_at}
tweet.lang : {tweet.lang}
tweet.source : {tweet.source}
tweet.source_url : {tweet.source_url}
tweet.truncated : {tweet.truncated}
tweet.retweet_count : {tweet.retweet_count}
tweet.retweeted : {tweet.retweeted}
tweet.retweet : {tweet.retweet}
tweet.retweets : {tweet.retweets}
tweet.possibly_sensitive : {tweet.possibly_sensitive}
tweet.possibly_sensitive_appealable : {tweet.possibly_sensitive_appealable}
"""
)
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
12eca4b3e8ae4bf6f27c07a03bbc58a313b36f5f | d668209e9951d249020765c011a836f193004c01 | /tools/pnnx/tests/test_torch_fft_irfft.py | 8f92dd551a1f5c2f0b5ff9c8894b75b1b122d362 | [
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] | permissive | Tencent/ncnn | d8371746c00439304c279041647362a723330a79 | 14b000d2b739bd0f169a9ccfeb042da06fa0a84a | refs/heads/master | 2023-08-31T14:04:36.635201 | 2023-08-31T04:19:23 | 2023-08-31T04:19:23 | 95,879,426 | 18,818 | 4,491 | NOASSERTION | 2023-09-14T15:44:56 | 2017-06-30T10:55:37 | C++ | UTF-8 | Python | false | false | 1,804 | py | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x, y, z):
x = torch.fft.irfft(x, norm="backward")
y = torch.fft.irfft(y, dim=(1), norm="forward")
z = torch.fft.irfft(z, norm="ortho")
return x, y, z
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 3, 120, 120)
y = torch.rand(1, 100, 2, 120)
z = torch.rand(1, 20, 20)
a = net(x, y, z)
# export torchscript
mod = torch.jit.trace(net, (x, y, z))
mod.save("test_torch_fft_irfft.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_torch_fft_irfft.pt inputshape=[1,3,120,120],[1,100,2,120],[1,20,20]")
# pnnx inference
import test_torch_fft_irfft_pnnx
b = test_torch_fft_irfft_pnnx.test_inference()
for a0, b0 in zip(a, b):
if not torch.equal(a0, b0):
return False
return True
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
| [
"noreply@github.com"
] | noreply@github.com |
fff29da02d95309713cc9a0f7a86f69832ba5220 | 83a506a501561602ad3b259341225ddfbddab160 | /GameServer/matchGames/Match_PK_DouDiZhu/redis_instance.py | 3fe50de16f52f543bb74fc19e6b8dcc7b80828c3 | [] | no_license | daxingyou/SouYouJi_Game | 9dc5f02eb28b910efb229653a8d0bffe425a7911 | 7311a994c9aba15b7234331709975ebc37e8453d | refs/heads/master | 2023-03-28T01:36:48.955107 | 2020-04-05T01:24:17 | 2020-04-05T01:24:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # -*- coding:utf-8 -*-
# !/bin/python
"""
Author: Pipo
Date: $Date$
Revision: $Revision$
Description: Redis
"""
import redis
from configs import CONFIGS
redisdb = None
def getInst(dbNum=CONFIGS['redis']['db']):
global redisdb
redisdb = redis.ConnectionPool(
host=CONFIGS['redis']['host'],
port=CONFIGS['redis']['port'],
db=dbNum,
password=CONFIGS['redis']['password']
)
redisData = redis.Redis(connection_pool=redisdb)
return redisData
| [
"ronnyzh@yeah.net"
] | ronnyzh@yeah.net |
6ebf11b3f019ebe0338ba4e09bbe5dcd2b7fbd4f | ec4e153f3bf1b335bc1b31b85e6f9db4a6c4faa9 | /wd_extractor/Document.py | 6ed52824a553446bd88f07562c5ca97fb6fb3529 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | DuaneNielsen/wd_extractor | 7936ac29ae97972cfe74973108aaad1efa5054b6 | 128a189bacd0cd2d7f1fa598202b9c4e55f48e2f | refs/heads/master | 2021-01-19T14:13:42.441554 | 2017-09-19T02:16:08 | 2017-09-19T02:16:08 | 100,887,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | from .Graminator import Graminator
class Document:
def __init__(self, corpus, path, grams):
self.corpus = corpus
self.grams = grams
self.graminator = None
self.path = path
self.tokens = corpus.tokenizer.tokens(self)
def getText(self):
if self.path is not None:
handle = open(self.path, "r")
text = handle.read()
return text
def length(self):
return len(self.tokens)
def nGrams(self, gramsize):
return Graminator(self, gramsize)
def hasNext(self, index):
index += 1
return (index > 0) and index < len(self.tokens)
def nextToken(self, index):
return self.tokens[index + 1]
def hasPrev(self, index):
index -= 1
return (index > 0) and index < len(self.tokens)
def prevToken(self, index):
return self.tokens[index-1]
| [
"duane.nielsen.rocks@gmail.com"
] | duane.nielsen.rocks@gmail.com |
e8e634391f303d0b6ceac4ace6299bdd2bf13d16 | 92caa71c5a5135603cd875ce56dcd28a7dac7655 | /GHER/common/vec_env/subproc_vec_env.py | 44ac0f946d3087a480df1d7ca5ba9e3acb077a7b | [] | no_license | MrDadaGuy/GHER | 286343d66815971afd5bd47f6ee0e1940b144610 | 8b1b2a2aba6a684db5921d2328c79e7f36aa8eae | refs/heads/master | 2022-03-25T08:48:43.359759 | 2019-07-19T14:59:53 | 2019-07-19T14:59:53 | 173,989,952 | 0 | 0 | null | 2019-03-05T17:21:49 | 2019-03-05T17:21:49 | null | UTF-8 | Python | false | false | 2,859 | py | import numpy as np
from multiprocessing import Process, Pipe
from GHER.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
| [
"baichenjia@163.com"
] | baichenjia@163.com |
0d3ffa1d191e50eef4a95f5452a805a99887284a | c65028cd7520d6ac4b109eb4fafc470bfafcabfb | /logit.py | 2970708f6eefa3453ef031a0e0a254360824535d | [] | no_license | devsunb/commit-log-exporter | 17ca29eb2ede7cfffb140101fa002bffc6567549 | 5c58b9d5d902ac017a05bf6a9451390f65c5560d | refs/heads/master | 2023-07-26T14:36:12.141223 | 2021-08-31T08:04:56 | 2021-08-31T08:04:56 | 401,622,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | import logging
import re
import subprocess
logger = logging.getLogger('LOGIT')
class Logit:
@staticmethod
def parse(commit_lines):
commits = []
commit = {}
for line in commit_lines:
if line == '' or line == '\n' or re.match('merge:', line, re.IGNORECASE):
continue
elif re.match('commit', line, re.IGNORECASE):
if len(commit) != 0:
commits.append(commit)
commit = {'hash': re.match('commit (.*)', line, re.IGNORECASE).group(1)}
elif re.match('Author:', line, re.IGNORECASE):
m = re.compile('Author: (.*) <(.*)>').match(line)
commit['author'] = m.group(1)
commit['email'] = m.group(2)
elif re.match('Date:', line, re.IGNORECASE):
m = re.compile('Date: (.*)').match(line)
commit['date'] = m.group(1)
elif re.match(' ', line, re.IGNORECASE):
if 'message' not in commit:
commit['message'] = line.strip()
else:
commit['message'] += '\n' + line.strip()
else:
logger.error('Unexpected Line: ' + line)
if len(commit) != 0:
commits.append(commit)
return commits
def log(self, cwd, path):
output = ''
with subprocess.Popen(['git', 'log', '--', path], cwd=cwd, stdout=subprocess.PIPE, bufsize=1,
universal_newlines=True) as p:
for line in p.stdout:
output += line
return self.parse(output.split('\n'))
| [
"devsunb@gmail.com"
] | devsunb@gmail.com |
acd320e9b5670c9b4b981d67159ec8153117268f | 6a020d2afc402b443013a1a65b961d21e868ad8b | /tools/mdtable.py | 92d401420be8a0b8255ba25b904ef3829c2fbe13 | [] | no_license | scientist1642/anagram_benchmarks | fd5645f44e8454596ac3f06768a586819fe12403 | ccbc728544ce86028e0a2d9f8de0490c8ccecc72 | refs/heads/master | 2020-04-27T19:26:06.883530 | 2020-02-27T12:36:32 | 2020-02-27T12:36:32 | 174,617,771 | 4 | 1 | null | 2019-03-09T10:42:27 | 2019-03-08T22:15:14 | Python | UTF-8 | Python | false | false | 3,600 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
def get_matrix_from_csv(stream, separator=";"):
"""Return a matrix of values from an input stream containing csv data."""
for line in stream:
yield [col.strip("\r\n") for col in line.split(separator)]
def get_matrix_from_md(stream):
"""Return a matrix of values from an input stream containing a markdown table."""
for idx, line in enumerate(stream):
if idx == 1:
continue
cols = line.split("|")
yield [col.strip(" \r\n") for col in cols][1:len(cols) - 1]
def get_column_store_from_matrix(matrix, escape=False):
"""Return input stream as a column-store."""
columns = []
for row in matrix:
if not len(columns):
columns = [[get_escaped_string(col, escape)] for col in row]
else:
for idx, col in enumerate(row):
columns[idx].append(get_escaped_string(col, escape))
return columns
MARKDOWN_ESCAPE_CHAR = "\\`*_{}[]()#+-.!"
def get_escaped_string(text, escape=True):
"""Return text with escaped characters for markdown."""
if escape:
for esc_chr in MARKDOWN_ESCAPE_CHAR:
text = text.replace(esc_chr, "\\" + esc_chr)
return text
def get_formatted_string(text, width, formatted=True):
"""Return text with trailing spaces."""
return " " + text + " " + (" " * (width - len(text))) if formatted else text
def get_md_table(column_store, formatted=True):
"""Return a formatted markdown table from a column-store."""
widths = [max(len(value) for value in column) for column in column_store]
md_table = ""
for idx in range(len(column_store[0])):
columns = (column[idx] for column in column_store)
md_table += "|" + "|".join((get_formatted_string(column, width, formatted) for column, width in zip(columns, widths))) + "|\n"
if idx == 0:
md_table += "|" + "|".join(("-" * (width + 2 if formatted else 3) for width in widths)) + "|\n"
return md_table
def get_csv_table(column_store, separator=";"):
"""Return a csv table from a column-store."""
csv_table = ""
for idx in range(len(column_store[0])):
columns = (column[idx] for column in column_store)
csv_table += separator.join(columns) + "\n"
return csv_table
def main(args):
"""Main function."""
formatted = True
escape = False
input_type = "csv"
output_type = "md"
separator = ";"
for arg in args:
if arg == "-mini":
formatted = False
elif arg.startswith("-in:"):
input_type = arg[4:].lower()
elif arg.startswith("-out:"):
output_type = arg[5:].lower()
elif arg.startswith("-separator:"):
separator = arg[11:]
elif arg.startswith("-escape"):
escape = True
if separator == "tab":
separator = "\t"
if input_type == "csv":
matrix = get_matrix_from_csv(sys.stdin, separator)
elif input_type == "md":
matrix = get_matrix_from_md(sys.stdin)
else:
raise Exception("Invalid input type: %s (csv or md expected)" % input_type)
if output_type == "csv":
table = get_csv_table(get_column_store_from_matrix(matrix), separator)
elif output_type == "md":
table = get_md_table(get_column_store_from_matrix(matrix, escape), formatted)
else:
raise Exception("Invalid output type: %s (csv or md expected)" % output_type)
sys.stdout.write(table)
sys.stdout.flush()
if __name__ == "__main__":
main(sys.argv[1:])
| [
"z.isakadze@gmail.com"
] | z.isakadze@gmail.com |
95e9f1d292ccffad970294b2b502147429f71198 | 23b5337bf410415b7b150e3ad60cafc1578a0441 | /07-User-Authentication/01-Flask-Login/myproject/__init__.py | 54b954d72924a39c7987de9eda326bbc04bd3512 | [] | no_license | VerdantFox/flask_course | b8de13ad312c14229f0c3bc2af70e8609a3b00fb | 47b167b54bc580734fa69fc1a2d7e724adfb9610 | refs/heads/master | 2021-09-10T05:01:47.385859 | 2020-02-24T21:07:05 | 2020-02-24T21:07:05 | 241,973,705 | 0 | 0 | null | 2021-09-08T01:40:59 | 2020-02-20T19:40:42 | Python | UTF-8 | Python | false | false | 761 | py | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
# Create a login manager object
login_manager = LoginManager()
app = Flask(__name__)
# Often people will also separate these into a separate config.py file
app.config["SECRET_KEY"] = "mysecretkey"
basedir = os.path.abspath(os.path.dirname(__file__))
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(
basedir, "data.sqlite"
)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
Migrate(app, db)
# We can now pass in our app to the login manager
login_manager.init_app(app)
# Tell users what view to go to when they need to login.
login_manager.login_view = "login"
| [
"verdantfoxx@gmail.com"
] | verdantfoxx@gmail.com |
40944c5fac9d6218fbb3fccd6144a95baa133e07 | 8bef4e20c773ebd6ccb6af0cb81eb788db920cb0 | /Routes/addNote.py | bd5398cafada0dd39b1819418c86ad88086ec1f0 | [] | no_license | animoffa/PhytonProj | 44207cc3f52826c136b8ea2d204f443f0e604fd7 | 1e62c94af510631fb9ad3b46f6dd882687d183ee | refs/heads/master | 2023-01-28T00:22:56.258766 | 2020-12-09T14:41:15 | 2020-12-09T14:41:15 | 319,985,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,203 | py | from flask.blueprints import Blueprint
from flask import render_template
from flask import request
from models.lectгurers import Lect
from models.subjects import Subj
from models.intervals import Intervals
from models.groups import Group
from managers.DatabaseManager import DatabaseManager
from extensions import db
db_manager = DatabaseManager(db)
addNote = Blueprint('addNote', __name__,
template_folder='templates',
static_folder='static')
@addNote.route('/addNote')
def index4():
return render_template('addNote.html', lecturers=Lect.query.all(), groups=Group.query.all(), intervals=Intervals.query.all(), subjects = Subj.query.all())
@addNote.route('/addNote',methods=['post', 'get'])
def addN():
if request.method == 'POST':
interval = request.form.get('interval')
chet = request.form.get('chet')
day = request.form.get('day')
subject = request.form.get('subject')
lecturer = request.form.get('lecturer')
group = request.form.get('group')
if interval and subject and lecturer and group and chet and day:
message = "Note added successfully"
new_dict = {}
new_dict["lect"] = lecturer
new_dict["subj"] = subject
new_dict["interv"] = interval
new_dict["group"] = group
new = lecturer.split()
name = new[1]
l_name = new[0]
s_name = new[2]
lecturer_id = db.session.query(Lect.id).filter(Lect.name == name, Lect.last_name == l_name, Lect.surname == s_name).first()[0]
group_id = db.session.query(Group.id).filter(Group.name==group).first()[0]
subject_id = db.session.query(Subj.id).filter(Subj.subject_name == subject).first()[0]
interval_id = db.session.query(Intervals.id).filter(Intervals.interval == interval).first()[0]
db_manager.add_schedule(day=day,chet=chet,group_id=group_id, interval_id = interval_id, subject_id = subject_id, lecturer_id= lecturer_id)
else:
message = "error"
return render_template('addNote.html', message=message, prob=" ", lecturers=Lect.query.all(), groups=Group.query.all(), intervals=Intervals.query.all(), subjects = Subj.query.all()) | [
"animoffa1352@gmail.com"
] | animoffa1352@gmail.com |
0c0c96329a80a01c75b885a1de44d8ad5ef2138d | 64965365bfeb800c9b4facae7d707f752c922d9c | /args_aula.py | 9d4788c4db0121d903f357ba664099c15d84c3df | [] | no_license | RafaelNGP/Curso-Python | 585b535f1383931286d897fe2745a1a976e3a3b8 | 0300566578b176071abe7083b222d4fa8a338e90 | refs/heads/master | 2020-12-03T17:33:35.907478 | 2020-06-26T18:33:27 | 2020-06-26T18:33:27 | 231,409,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | """
Entendendo o *args
- Eh um parametro, como outro qualquer. Isso significa que voce podera chamar de qualquer
coisa, desde que comece com *
por ex:
*xis
mas por convencao, todos utilizam *args para defini-li.
O QUE EH O ARGS??
O parametro *args utilizado em uma funcao, coloca os valores extras informados como entrada
em uma tupla, entao desde ja lembre-se que as tuplas sao imutaveis.
"""
def soma_todos_os_numeros(num1, num2, num3):
return num1 + num2 + num3
print(soma_todos_os_numeros(4, 6, 9))
# Entendendo o Args
def soma_todos_os_atributos(*args):
return sum(args)
print(soma_todos_os_atributos())
print(soma_todos_os_atributos(8))
print(soma_todos_os_atributos(9, 8))
print(soma_todos_os_atributos(9, 5, 3))
print(soma_todos_os_atributos(7, 8, 2, 9))
# print(soma_todos_os_atributos(8, 6, "AHAL"))
def cadastro_usuario(*args):
nome = input("Qual o nome que sera cadastrado? ")
email = input("Informe seu email: ")
idade = int(input("Qual sua idade? "))
tipo_conta = input("Que tipo de contavai abrir? (User/ADM) ")
if idade >= 18:
idade = "Maior de idade"
else:
idade = "Menor de idade"
print(f'\n{tipo_conta} {nome} cadastrado com sucesso!\n'
f'{idade}\n'
f'Email para contato: {email}')
print(args)
# print(cadastro_usuario(nome, email, idade, tipo_conta, "Periodo de Testes", 3.14))
def verifica_info(*args):
if 'Geek' in args and 'University' in args:
return print("Seja bem vindo, Geek!")
return print("Nao tenho certeza de quem eh voce")
# verifica_info()
# verifica_info("Geek", "University")
# verifica_info("University", 3.14, 'Geek')
# O * serve para que informemos ao python que estamos passando como argumento
# uma colecao de dados. Desta forma, ele sabera que vai precisar desempacotar os dados antes.
numeros = [1, 2, 3, 4, 5, 6, 7]
print(soma_todos_os_atributos(*numeros))
| [
"rafaelferreira.dev3@gmail.com"
] | rafaelferreira.dev3@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.