blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
55a9ae4fcb8601a935aad537fd08d7ea2d4a6011
|
Python
|
Quer-io/Quer.io
|
/tests/ml/test_expressionnode.py
|
UTF-8
| 1,119
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from parameterized import parameterized
from querio.ml.expression.feature import Feature
class TestExpressionNode(unittest.TestCase):
@parameterized.expand([
('Simple true', Feature('age') > 30, 'age', 40, True),
('Simple false', Feature('age') < 30, 'age', 40, False),
('Simple equal', Feature('age') == 30, 'age', 30, True),
('Simple not equal', Feature('age') == 30, 'age', 40, False),
('Simple limit', Feature('age') < 30, 'age', 30, False),
('And true', (
(Feature('age') > 30) & (Feature('age') < 50)
), 'age', 40, True),
('And false', (
(Feature('age') > 30) & (Feature('age') < 50)
), 'age', 20, False),
('Or true', (
(Feature('age') > 30) | (Feature('age') < 20)
), 'age', 40, True),
('Or false', (
(Feature('age') > 30) | (Feature('age') < 20)
), 'age', 25, False),
])
def test_match(self, name, expression, feature, value, is_match):
match = expression.match(feature, value)
self.assertEqual(match, is_match)
| true
|
a520103dd3a88e8a7e80077fa29719754173693e
|
Python
|
mathieu-lemay/aoc_2018
|
/12.py
|
UTF-8
| 3,227
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env python
import os.path
from time import time
class Pattern:
def __init__(self, in_, out):
self.in_ = in_
self.out = out
def fix_array(offset, arr):
if "#" not in arr:
return offset, arr
# Fix start
s = 0
for i in range(len(arr)):
if arr[i] == "#":
s = i
break
if s < 3:
x = 3 - s
offset -= x
arr = ["."] * x + arr
elif s > 3:
x = s - 3
offset += x
arr = arr[x:]
# Fix end
s = 0
for i in range(len(arr)):
if arr[-(i + 1)] == "#":
s = i
break
if s < 3:
x = 3 - s
arr = arr + ["."] * x
elif s > 3:
x = s - 3
arr = arr[:-x]
return offset, arr
def sum_plants(arr, offset):
return sum(i + offset for i, c in enumerate(arr) if c == "#")
def main():
patterns = []
generations = 20
offset = 0
with open(os.path.join("input", "12.txt")) as f:
l1 = f.readline()
og_array = [c for c in l1 if c in (".", "#")]
_ = f.readline()
for l in f:
in_, out = l.split(" => ")
out = out[0]
patterns.append(Pattern(in_, out))
arr = og_array[:]
for gen in range(generations):
offset, arr = fix_array(offset, arr)
arr_new = []
for i in range(0, len(arr)):
if i < 2 or i > len(arr) - 2:
arr_new.append(".")
continue
cur = "".join(arr[i - 2 : i + 3])
for p in patterns:
if cur == p.in_:
arr_new.append(p.out)
break
else:
arr_new.append(".")
arr = arr_new
s = sum_plants(arr, offset)
print("Part 1: %d" % s)
arr = og_array[:]
offset = 0
prev_cksum = 0
c = 0
offset_delta = 0
prev_offset = 0
last_gen = 0
generations = 50000000000
for gen in range(generations):
offset, arr = fix_array(offset, arr)
arr_new = []
for i in range(0, len(arr)):
if i < 2 or i > len(arr) - 2:
arr_new.append(".")
continue
cur = "".join(arr[i - 2 : i + 3])
for p in patterns:
if cur == p.in_:
arr_new.append(p.out)
break
else:
arr_new.append(".")
cksum = sum(i if c == "#" else 0 for i, c in enumerate(arr))
if cksum == prev_cksum and offset - prev_offset == offset_delta:
c += 1
if c == 100:
last_gen = gen + 1
arr = arr_new
print("Stopped at gen %d offset is %d" % (last_gen, offset))
break
else:
c = 0
arr = arr_new
prev_cksum = cksum
offset_delta = offset - prev_offset
prev_offset = offset
s = sum_plants(arr, offset)
nb = len([c for c in arr if c == "#"])
s = (generations - last_gen) * nb + s
print("Part 2: %d" % s)
if __name__ == "__main__":
ts = time()
main()
ts = time() - ts
print("Done in %.3fms" % (ts * 1000))
| true
|
539ad78b0b990c5d80143d5ff9d4488a2f5c8964
|
Python
|
syzdemonhunter/Coding_Exercises
|
/Leetcode/170.py
|
UTF-8
| 885
| 3.921875
| 4
|
[] |
no_license
|
# https://leetcode.com/problems/two-sum-iii-data-structure-design/
class TwoSum:
def __init__(self):
"""
Initialize your data structure here.
"""
self.dic = {}
# time: O(1)
def add(self, number: int) -> None:
"""
Add the number to an internal data structure..
"""
self.dic[number] = self.dic.get(number, 0) + 1
# time: O(n)
def find(self, value: int) -> bool:
"""
Find if there exists any pair of numbers which sum is equal to the value.
"""
for i in self.dic.keys():
j = value - i
if (i == j and self.dic.get(i) > 1) or (i != j and j in self.dic):
return True
return False
# Your TwoSum object will be instantiated and called as such:
# obj = TwoSum()
# obj.add(number)
# param_2 = obj.find(value)
| true
|
88adbfe8649912c9ceeee308ff261fe053a7ca10
|
Python
|
ealataur/rcute-ai
|
/rcute_ai/tts_espeak.py
|
UTF-8
| 4,986
| 2.609375
| 3
|
[] |
no_license
|
# modified from github.com/gooofy/py-espeak-ng
import re
import subprocess
import tempfile
from . import util
from pyttsx3.voice import Voice
def lang_detect(txt):
return 'zh' if re.findall(r'[\u4e00-\u9fff]+', txt) else 'en'
class TTS:
"""text to speech on Linux"""
def __init__(self):
self.default_settings= {'b': 1}
""" voice/volume/pitch/speed etc. See `espeak <http://espeak.sourceforge.net/commands.html>`_ command options section"""
self._cmd_param_map= {'voice':'v', 'lang':'v',
'volume': 'a',
'capitals': 'k',
'line_length': 'l',
'pitch': 'p',
'speed': 's',
'word_gap': 'g'}
def _normalize_cmd_param(self, txt, options):
op = {self._cmd_param_map.get(k,k):str(v) for k,v in self.default_settings.items()}
op.update({self._cmd_param_map.get(k,k):str(v) for k,v in options.items()})
if not op.get('v'):
op['v'] = lang_detect(txt)
gd = op.pop('gender', None)
if gd:
op['v'] = op['v'].split('+')[0] + '+'+ gd.lower()+ ('1' if len(gd)==1 else '')
return {('-'if len(k)==1 else '--')+k:v for k,v in op.items()}
def _exe(self, cmd, sync=False):
# logging.debug ('espeak cmd: '+ ' '.join(cmd))
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
res = iter(p.stdout.readline, b'')
if not sync:
p.stdout.close()
if p.stderr:
p.stderr.close()
if p.stdin:
p.stdin.close()
return res
res2 = []
for line in res:
res2.append(line)
p.stdout.close()
if p.stderr:
p.stderr.close()
if p.stdin:
p.stdin.close()
p.wait()
return res2
def say(self, txt, **options):
"""speak text
:param txt: text to be said
:type txt: str
:param options: if not set, :data:`default_settings` is used.
* voice/lang: if not set, English is the default unless Chinese characters are detected in :data:`txt`
* volume
* pitch
* speed
* word_gap
See `espeak <http://espeak.sourceforge.net/commands.html>`_ command options section
:type options: optional
"""
op = self._normalize_cmd_param(txt, options)
cmd = ['espeak', txt.encode('utf8')]
cmd.extend(sum(op.items(),()))
return self._exe(cmd, sync=False)
def tts_wav(self, txt, file=None, **options):
"""return tts wav or save it to file
:param txt: text to be said
:type txt: str
:param file: path for tts wav data to be saved at, default to None
:type txt: str, optional
:param options: if not set, :data:`default_settings` is used.
* voice/lang: if not set, English is the default unless Chinese characters are detected in :data:`txt`
* volume
* pitch
* speed
* word_gap
See `espeak <http://espeak.sourceforge.net/commands.html>`_ command options section
:type options: optional
:return: wav data if :data:`file` is not specified
:rtype: bytes or None
"""
# if fmt == 'xs':
# txt = '[[' + txt + ']]'
# elif fmt != 'txt':
# raise Exception ('unknown format: %s' % fmt)
with (open(file, 'w') if file else tempfile.NamedTemporaryFile()) as f:
op = self._normalize_cmd_param(txt, options)
cmd = ['espeak', txt.encode('utf8'), '-w', f.name]
cmd.extend(sum(op.items(),()))
self._exe(cmd, sync=True)
if file:
return
f.seek(0)
return f.read()
@property
def voices(self):
"""return installed voices
"""
res = self._exe('espeak --voices'.split(), sync=True)
voices = []
gd ={'M':'male', 'F':'female'}
for i,v in enumerate(res[1:]):
parts = v.decode('utf8').split()
if len(parts)<5:
continue
age_parts = parts[2].split('/')
voice = Voice(id=i,
# 'pty' : parts[0],
languages = [parts[1]],
age = None if len(age_parts)==1 else age_parts[-2],
gender = gd.get(age_parts[-1], age_parts[-1]),
name = parts[3],
# 'file' : parts[4],
)
# logging.debug ('espeakng: voices: parts= %s %s -> %s' % (len(parts), repr(parts), repr(voice)))
voices.append(voice)
return voices
| true
|
40e8b41307a504d8f40e870935245d291e41a3eb
|
Python
|
unaguil/hyperion-ns2
|
/experiments/interpolators/InterpolatorLoader.py
|
UTF-8
| 597
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
from interpolators.LinearInterpolator import LinearInterpolator
from interpolators.IntegerInterpolator import IntegerInterpolator
from interpolators.SetInterpolator import SetInterpolator
def loadInterpolator(entry):
interpolator = entry.getAttribute("interpolator")
if interpolator == 'LinearInterpolator':
return LinearInterpolator(entry)
if interpolator == 'IntegerInterpolator':
return IntegerInterpolator(entry)
if interpolator == 'SetInterpolator':
return SetInterpolator(entry)
raise Exception('Unknown interpolator %s' % interpolator)
| true
|
df97d8b2c03941f97c24772f7c3fe50b4190e900
|
Python
|
aliyyahna/opencv-exercise
|
/chapter 4/hstack.py
|
UTF-8
| 189
| 2.703125
| 3
|
[] |
no_license
|
import cv2
import numpy as np
citraA = cv2.imread('./img/baboon.png')
citraB = cv2.imread('./img/lena.bmp')
hasil = np.hstack((citraA, citraB))
cv2.imshow('Hasil', hasil)
cv2.waitKey(0)
| true
|
6f126dc92819ccf4c6c73b6cddfc5119adbce810
|
Python
|
esix/competitive-programming
|
/acmp/page-03/0125/main.py
|
UTF-8
| 226
| 2.78125
| 3
|
[] |
no_license
|
n = int(input())
a = [list(map(int, input().split(' ')[:n])) for i in range(n)]
input()
b = list(map(int, input().split(' ')[:n]))
r = 0
for i in range(n):
r += sum([b[i] != b[j] for j in range(i,n) if a[i][j]])
print(r)
| true
|
585054623a0ab223508ccb6727d44c4107d96e16
|
Python
|
kristjanleifur4/kristjan
|
/test.py
|
UTF-8
| 219
| 3.6875
| 4
|
[] |
no_license
|
first = int(input("First: "))
step = int(input("Step: "))
the_sum = 0
i = 0
while the_sum <= 100:
i += step
the_sum = first + i
print(i, end=" ")
else:
print()
print("Sum of series:",the_sum)
| true
|
53772c008e8d0ce0aeb2b1dc7f0019885cfd5765
|
Python
|
thinh2904/Chuong4
|
/Bai12.3.py
|
UTF-8
| 728
| 3.9375
| 4
|
[] |
no_license
|
import random
import numpy as np
import string
#Tạo bảng chữ cái in hoa
a=string.ascii_uppercase
#Tạo bảng chữ cái in thường
b=string.ascii_lowercase
#Random số phần tử của List từ 50 đến 100
n=random.randrange(50,101)
#Tạo List dạng dictionary có cấu trúc {0.0} với n phần tử
listdict=list(np.zeros(n))
#Hàm tạo tên
def name():
k= random.choice(a)
for i in range(random.randrange(2,8)):
k+= random.choice(b)
return k
#Hàm tạo tuổi
def age():
age= random.randrange(0,100)
return age
#Gán các giá trị tên và tuổi vào List
for i in range(len(listdict)):
listdict[i]={'name':name(),'age':age()}
print(listdict)
| true
|
5da9e0ed201d9b197f074e45d4ca1a7b986a17cd
|
Python
|
dr-rodriguez/SIMPLE
|
/scripts/ingests/ingest_utils.py
|
UTF-8
| 63,958
| 2.703125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
Utils functions for use in ingests
"""
from astroquery.simbad import Simbad
from astropy.coordinates import SkyCoord
import astropy.units as u
from astroquery.gaia import Gaia
from typing import List, Union, Optional
import numpy as np
import numpy.ma as ma
import pandas as pd
from sqlalchemy import func, null
from astropy.io import fits
import dateutil
import re
import requests
from scripts.ingests.utils import *
logger = logging.getLogger('SIMPLE')
# NAMES
def ingest_names(db, source, other_name):
'''
This function ingests an other name into the Names table
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object created by astrodbkit2
source: str
Name of source as it appears in sources table
other_name: str
Name of the source different than that found in source table
Returns
-------
None
'''
names_data = [{'source': source, 'other_name': other_name}]
try:
with db.engine.connect() as conn:
conn.execute(db.Names.insert().values(names_data))
conn.commit()
logger.info(f" Name added to database: {names_data}\n")
except sqlalchemy.exc.IntegrityError as e:
msg = f"Could not add {names_data} to database. Name is likely a duplicate."
logger.warning(msg)
raise SimpleError(msg + '\n' + str(e) + '\n')
# SOURCES
def ingest_sources(db, sources, references=None, ras=None, decs=None, comments=None, epochs=None,
equinoxes=None, other_references=None, raise_error=True, search_db=True):
"""
Script to ingest sources
TODO: better support references=None
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object created by astrodbkit2
sources: list[str]
Names of sources
references: str or list[strings]
Discovery references of sources
ras: list[floats], optional
Right ascensions of sources. Decimal degrees.
decs: list[floats], optional
Declinations of sources. Decimal degrees.
comments: list[strings], optional
Comments
epochs: str or list[str], optional
Epochs of coordinates
equinoxes: str or list[string], optional
Equinoxes of coordinates
other_references: str or list[strings]
raise_error: bool, optional
True (default): Raise an error if a source cannot be ingested
False: Log a warning but skip sources which cannot be ingested
search_db: bool, optional
True (default): Search database to see if source is already ingested
False: Ingest source without searching the database
Returns
-------
None
"""
# TODO: add example
# SETUP INPUTS
if ras is None and decs is None:
coords = False
else:
coords = True
if isinstance(sources, str):
n_sources = 1
else:
n_sources = len(sources)
# Convert single element input values into lists
input_values = [sources, references, ras, decs, epochs, equinoxes, comments, other_references]
for i, input_value in enumerate(input_values):
if input_value is None:
input_values[i] = [None] * n_sources
elif isinstance(input_value, (str, float)):
input_values[i] = [input_value] * n_sources
sources, references, ras, decs, epochs, equinoxes, comments, other_references = input_values
n_added = 0
n_existing = 0
n_names = 0
n_alt_names = 0
n_skipped = 0
n_multiples = 0
if n_sources > 1:
logger.info(f"Trying to add {n_sources} sources")
# Loop over each source and decide to ingest, skip, or add alt name
for i, source in enumerate(sources):
# Find out if source is already in database or not
if coords and search_db:
name_matches = find_source_in_db(db, source, ra=ras[i], dec=decs[i])
elif search_db:
name_matches = find_source_in_db(db, source)
elif not search_db:
name_matches = []
else:
name_matches = None
ra = None
dec = None
if len(name_matches) == 1 and search_db: # Source is already in database
n_existing += 1
msg1 = f"{i}: Skipping {source}. Already in database as {name_matches[0]}. \n "
logger.debug(msg1)
# Figure out if ingest name is an alternate name and add
db_matches = db.search_object(source, output_table='Sources', fuzzy_search=False)
if len(db_matches) == 0:
#add other name to names table
ingest_names(db, name_matches[0], source)
n_alt_names += 1
continue
elif len(name_matches) > 1 and search_db: # Multiple source matches in the database
n_multiples += 1
msg1 = f"{i} Skipping {source} "
msg = f"{i} More than one match for {source}\n {name_matches}\n"
logger.warning(msg1 + msg)
if raise_error:
raise SimpleError(msg)
else:
continue
elif len(name_matches) == 0 or not search_db: # No match in the database, INGEST!
if coords: # Coordinates were provided as input
ra = ras[i]
dec = decs[i]
epoch = None if ma.is_masked(epochs[i]) else epochs[i]
equinox = None if ma.is_masked(equinoxes[i]) else equinoxes[i]
else: # Try to get coordinates from SIMBAD
simbad_result_table = Simbad.query_object(source)
if simbad_result_table is None:
n_skipped += 1
ra = None
dec = None
msg = f"{i}: Skipping: {source}. Coordinates are needed and could not be retrieved from SIMBAD. \n"
logger.warning(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
elif len(simbad_result_table) == 1:
simbad_coords = simbad_result_table['RA'][0] + ' ' + simbad_result_table['DEC'][0]
simbad_skycoord = SkyCoord(simbad_coords, unit=(u.hourangle, u.deg))
ra = simbad_skycoord.to_string(style='decimal').split()[0]
dec = simbad_skycoord.to_string(style='decimal').split()[1]
epoch = '2000' # Default coordinates from SIMBAD are epoch 2000.
equinox = 'J2000' # Default frame from SIMBAD is IRCS and J2000.
msg = f"Coordinates retrieved from SIMBAD {ra}, {dec}"
logger.debug(msg)
else:
n_skipped += 1
ra = None
dec = None
msg = f"{i}: Skipping: {source}. Coordinates are needed and could not be retrieved from SIMBAD. \n"
logger.warning(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
logger.debug(f"{i}: Ingesting {source}. Not already in database. ")
else:
msg = f"{i}: unexpected condition encountered ingesting {source}"
logger.error(msg)
raise SimpleError(msg)
# Construct data to be added
source_data = [{'source': source,
'ra': ra,
'dec': dec,
'reference': references[i],
'epoch': epoch,
'equinox': equinox,
'other_references': other_references[i],
'comments': None if ma.is_masked(comments[i]) else comments[i]}]
names_data = [{'source': source,
'other_name': source}]
# Try to add the source to the database
try:
with db.engine.connect() as conn:
conn.execute(db.Sources.insert().values(source_data))
conn.commit()
n_added += 1
msg = f"Added {str(source_data)}"
logger.debug(msg)
except sqlalchemy.exc.IntegrityError:
if ma.is_masked(source_data[0]['reference']): # check if reference is blank
msg = f"{i}: Skipping: {source}. Discovery reference is blank. \n"
msg2 = f"\n {str(source_data)}\n"
logger.warning(msg)
logger.debug(msg2)
n_skipped += 1
if raise_error:
raise SimpleError(msg + msg2)
else:
continue
elif db.query(db.Publications).filter(db.Publications.c.publication == references[i]).count() == 0:
# check if reference is in Publications table
msg = f"{i}: Skipping: {source}. Discovery reference {references[i]} is not in Publications table. \n" \
f"(Add it with add_publication function.) \n "
msg2 = f"\n {str(source_data)}\n"
logger.warning(msg)
logger.debug(msg2)
n_skipped += 1
if raise_error:
raise SimpleError(msg + msg2)
else:
continue
else:
msg = f"{i}: Skipping: {source}. Not sure why."
msg2 = f"\n {str(source_data)} "
logger.warning(msg)
logger.debug(msg2)
n_skipped += 1
if raise_error:
raise SimpleError(msg + msg2)
else:
continue
# Try to add the source name to the Names table
try:
ingest_names(db, source, source)
n_names += 1
except sqlalchemy.exc.IntegrityError:
msg = f"{i}: Could not add {names_data} to database"
logger.warning(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
if n_sources > 1:
logger.info(f"Sources added to database: {n_added}")
logger.info(f"Names added to database: {n_names} \n")
logger.info(f"Sources already in database: {n_existing}")
logger.info(f"Alt Names added to database: {n_alt_names}")
logger.info(f"Sources NOT added to database because multiple matches: {n_multiples}")
logger.info(f"Sources NOT added to database: {n_skipped} \n")
if n_added != n_names:
msg = f"Number added should equal names added."
raise SimpleError(msg)
if n_added + n_existing + n_multiples + n_skipped != n_sources:
msg = f"Number added + Number skipped doesn't add up to total sources"
raise SimpleError(msg)
return
# SURVEY DATA
def find_survey_name_in_simbad(sources, desig_prefix, source_id_index=None):
"""
Function to extract source designations from SIMBAD
Parameters
----------
sources: astropy.table.Table
Sources names to search for in SIMBAD
desig_prefix
prefix to search for in list of identifiers
source_id_index
After a designation is split, this index indicates source id suffix.
For example, source_id_index = 2 to extract suffix from "Gaia DR2" designations.
source_id_index = 1 to exctract suffix from "2MASS" designations.
Returns
-------
Astropy table
"""
n_sources = len(sources)
Simbad.reset_votable_fields()
Simbad.add_votable_fields('typed_id') # keep search term in result table
Simbad.add_votable_fields('ids') # add all SIMBAD identifiers as an output column
logger.info("simbad query started")
result_table = Simbad.query_objects(sources['source'])
logger.info("simbad query ended")
ind = result_table['SCRIPT_NUMBER_ID'] > 0 # find indexes which contain results
simbad_ids = result_table['TYPED_ID', 'IDS'][ind]
db_names = []
simbad_designations = []
source_ids = []
for row in simbad_ids:
db_name = row['TYPED_ID']
ids = row['IDS'].split('|')
designation = [i for i in ids if desig_prefix in i]
if designation:
logger.debug(f'{db_name}, {designation[0]}')
db_names.append(db_name)
if len(designation) == 1:
simbad_designations.append(designation[0])
else:
simbad_designations.append(designation[0])
logger.warning(f'more than one designation matched, {designation}')
if source_id_index is not None:
source_id = designation[0].split()[source_id_index]
source_ids.append(int(source_id)) # convert to int since long in Gaia
n_matches = len(db_names)
logger.info(f"Found, {n_matches}, {desig_prefix}, sources for, {n_sources}, sources")
if source_id_index is not None:
result_table = Table([db_names, simbad_designations, source_ids],
names=('db_names', 'designation', 'source_id'))
else:
result_table = Table([db_names, simbad_designations],
names=('db_names', 'designation'))
return result_table
# SPECTRAL TYPES
def ingest_spectral_types(db, sources, spectral_types, references, regimes, spectral_type_error=None,
comments=None):
"""
Script to ingest spectral types
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object created by astrodbkit2
sources: str or list[str]
Names of sources
spectral_types: str or list[strings]
Spectral Types of sources
spectral_type_error: str or list[strings], optional
Spectral Type Errors of sources
regimes: str or list[str]
List or string
comments: list[strings], optional
Comments
references: str or list[strings]
Reference of the Spectral Type
Returns
-------
None
"""
n_sources = len(sources)
# Convert single element input value to list
input_values = [sources, spectral_types, spectral_type_error, regimes, comments, references]
for i, input_value in enumerate(input_values):
if input_value is None:
input_values[i] = [None] * n_sources
elif isinstance(input_value, str):
input_values[i] = [input_value] * n_sources
# Convert single element input value to list
sources, spectral_types, spectral_type_error, regimes, comments, references = input_values
n_added = 0
n_skipped = 0
logger.info(f"Trying to add {n_sources} spectral types")
for i, source in enumerate(sources):
db_name = find_source_in_db(db, source)
# Spectral Type data is in the database
if len(db_name) != 1:
msg = f"No unique source match for {source} in the database " \
f"(with SpT: {spectral_types[i]} from {references[i]})"
raise SimpleError(msg)
else:
db_name = db_name[0]
adopted = None
source_spt_data = db.query(db.SpectralTypes).filter(db.SpectralTypes.c.source == db_name).table()
if source_spt_data is None or len(source_spt_data) == 0:
adopted: True
logger.debug("No Spectral Type data for this source in the database")
elif len(source_spt_data) > 0:
# Spectral Type Data already exists
dupe_ind = source_spt_data['reference'] == references[i]
if sum(dupe_ind):
logger.debug(f"Duplicate measurement\n, {source_spt_data[dupe_ind]}")
else:
logger.debug("Another Spectral Type exists,")
if logger.level == 10:
source_spt_data.pprint_all()
adopted_ind = source_spt_data['adopted'] == 1
if sum(adopted_ind):
old_adopted = source_spt_data[adopted_ind]
if spectral_type_error[i] < min(source_spt_data['spectral_type_error']):
adopted = True
if old_adopted:
with db.engine.connect() as conn:
conn.execute(
db.SpectralTypes. \
update(). \
where(and_(db.SpectralTypes.c.source == old_adopted['source'][0],
db.SpectralTypes.c.reference == old_adopted['reference'][0])). \
values(adopted=False)
)
conn.commit()
# check that adopted flag is successfully changed
old_adopted_data = db.query(db.SpectralTypes).filter(
and_(db.SpectralTypes.c.source == old_adopted['source'][0],
db.SpectralTypes.c.reference == old_adopted['reference'][0])).table()
logger.debug("Old adopted measurement unset")
if logger.level == 10:
old_adopted_data.pprint_all()
logger.debug(f"The new spectral type's adopted flag is:, {adopted}")
else:
msg = "Unexpected state"
logger.error(msg)
raise RuntimeError
# Convert the spectral type string to code
spectral_type_code = convert_spt_string_to_code(spectral_types[i])[0]
msg = f"Converted {spectral_types[i]} to {spectral_type_code}"
logger.debug(msg)
# Construct the data to be added
spt_data = [{'source': db_name,
'spectral_type_string': spectral_types[i],
'spectral_type_code': spectral_type_code,
'spectral_type_error': spectral_type_error[i],
'regime': regimes[i],
'adopted': adopted,
'comments': comments[i],
'reference': references[i]}]
# Check if the entry already exists; if so: skip adding it
check = db.query(db.SpectralTypes.c.source).filter(and_(db.SpectralTypes.c.source == db_name,
db.SpectralTypes.c.regime == regimes[i],
db.SpectralTypes.c.reference == references[i])).count()
if check == 1:
n_skipped += 1
logger.info(f'Spectral type for {db_name} already in the database: skipping insert '
f'{spt_data}')
continue
logger.debug(f"Trying to insert {spt_data} into Spectral Types table ")
try:
with db.engine.connect() as conn:
conn.execute(db.SpectralTypes.insert().values(spt_data))
conn.commit()
n_added += 1
msg = f"Added {str(spt_data)}"
logger.debug(msg)
except sqlalchemy.exc.IntegrityError as e:
if db.query(db.Publications).filter(db.Publications.c.reference == references[i]).count() == 0:
msg = f"The publication does not exist in the database"
msg1 = f"Add it with ingest_publication function."
logger.debug(msg + msg1)
raise SimpleError(msg)
elif "NOT NULL constraint failed: SpectralTypes.regime" in str(e):
msg = f"The regime was not provided for {source}"
logger.error(msg)
raise SimpleError(msg)
else:
msg = "Other error\n"
logger.error(msg)
raise SimpleError(msg)
msg = f"Spectral types added: {n_added} \n" \
f"Spectral Types skipped: {n_skipped}"
logger.info(msg)
def convert_spt_string_to_code(spectral_types):
"""
normal tests: M0, M5.5, L0, L3.5, T0, T3, T4.5, Y0, Y5, Y9.
weird TESTS: sdM4, ≥Y4, T5pec, L2:, L0blue, Lpec, >L9, >M10, >L, T, Y
digits are needed in current implementation.
:param spectral_types:
:return:
"""
if isinstance(spectral_types, str):
spectral_types = [spectral_types]
spectral_type_codes = []
for spt in spectral_types:
logger.debug(f"Trying to convert: `{spt}`")
spt_code = np.nan
if spt == "":
spectral_type_codes.append(spt_code)
logger.debug("Appended NAN")
continue
if spt == "null":
spt_code = 0
spectral_type_codes.append(spt_code)
logger.debug("Appended Null")
continue
# identify main spectral class, loop over any prefix text to identify MLTY
for i, item in enumerate(spt):
if item == 'M':
spt_code = 60
break
elif item == 'L':
spt_code = 70
break
elif item == 'T':
spt_code = 80
break
elif item == 'Y':
spt_code = 90
break
else: # only trigger if not MLTY
i = 0
# find integer or decimal subclass and add to spt_code
if re.search('\d*\.?\d+', spt[i+1:]) is None:
spt_code = spt_code
else:
spt_code += float(re.findall('\d*\.?\d+', spt[i + 1:])[0])
spectral_type_codes.append(spt_code)
return spectral_type_codes
def convert_spt_code_to_string_to_code(spectral_codes, decimals=1):
"""
Convert spectral type codes to string values
Parameters
----------
spectral_codes : list[float]
List of spectral type codes
Returns
-------
spectral_types : list[str]
List of spectral types
"""
if isinstance(spectral_codes, float):
spectral_codes = [spectral_codes]
spectral_types = []
for spt in spectral_codes:
spt_type = ''
# Identify major type
if 60 <= spt < 70:
spt_type = 'M'
elif 70 <= spt < 80:
spt_type = 'L'
elif 80 <= spt < 90:
spt_type = 'T'
elif 90 <= spt < 100:
spt_type = 'Y'
# Numeric part of type
format = f'.{decimals}f'
spt_type = f'{spt_type}{spt % 10:{format}}'
logger.debug(f"Converting: {spt} -> {spt_type}")
spectral_types.append(spt_type)
return spectral_types
# PARALLAXES
def ingest_parallaxes(db, sources, plxs, plx_errs, plx_refs, comments=None):
"""
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object
sources: str or list[str]
list of source names
plxs: float or list[float]
list of parallaxes corresponding to the sources
plx_errs: float or list[float]
list of parallaxes uncertainties
plx_refs: str or list[str]
list of references for the parallax data
comments: Optional[Union[List[str], str]]
Examples
----------
> ingest_parallaxes(db, my_sources, my_plx, my_plx_unc, my_plx_refs)
"""
if isinstance(sources, str):
n_sources = 1
sources = [sources]
else:
n_sources = len(sources)
# Convert single element input value to list
if isinstance(plx_refs, str):
plx_refs = [plx_refs] * n_sources
if isinstance(comments, str):
comments = [comments] * n_sources
elif comments is None:
comments = [None] * n_sources
input_float_values = [plxs, plx_errs]
for i, input_value in enumerate(input_float_values):
if isinstance(input_value, float):
input_value = [input_value] * n_sources
input_float_values[i] = input_value
plxs, plx_errs = input_float_values
n_added = 0
for i, source in enumerate(sources): # loop through sources with parallax data to ingest
db_name = find_source_in_db(db, source)
if len(db_name) != 1:
msg = f"No unique source match for {source} in the database"
raise SimpleError(msg)
else:
db_name = db_name[0]
# Search for existing parallax data and determine if this is the best
# If no previous measurement exists, set the new one to the Adopted measurement
adopted = None
source_plx_data: Table = db.query(db.Parallaxes).filter(db.Parallaxes.c.source == db_name).table()
if source_plx_data is None or len(source_plx_data) == 0:
# if there's no other measurements in the database, set new data Adopted = True
adopted = True
# old_adopted = None # not used
logger.debug("No other measurement")
elif len(source_plx_data) > 0: # Parallax data already exists
# check for duplicate measurement
dupe_ind = source_plx_data['reference'] == plx_refs[i]
if sum(dupe_ind):
logger.debug(f"Duplicate measurement\n, {source_plx_data[dupe_ind]}")
continue
else:
logger.debug("!!! Another parallax measurement exists,")
if logger.level == 10:
source_plx_data.pprint_all()
# check for previous adopted measurement and find new adopted
adopted_ind = source_plx_data['adopted'] == 1
if sum(adopted_ind):
old_adopted = source_plx_data[adopted_ind]
# if errors of new data are less than other measurements, set Adopted = True.
if plx_errs[i] < min(source_plx_data['parallax_error']):
adopted = True
# unset old adopted
if old_adopted:
with db.engine.connect() as conn:
conn.execute(
db.Parallaxes. \
update(). \
where(and_(db.Parallaxes.c.source == old_adopted['source'][0],
db.Parallaxes.c.reference == old_adopted['reference'][0])). \
values(adopted=False)
)
conn.commit()
# check that adopted flag is successfully changed
old_adopted_data = db.query(db.Parallaxes).filter(
and_(db.Parallaxes.c.source == old_adopted['source'][0],
db.Parallaxes.c.reference == old_adopted['reference'][0])).table()
logger.debug("Old adopted measurement unset")
if logger.level == 10:
old_adopted_data.pprint_all()
else:
adopted = False
logger.debug(f"The new measurement's adopted flag is:, {adopted}")
else:
msg = 'Unexpected state'
logger.error(msg)
raise RuntimeError(msg)
# Construct data to be added
parallax_data = [{'source': db_name,
'parallax': plxs[i],
'parallax_error': plx_errs[i],
'reference': plx_refs[i],
'adopted': adopted,
'comments': comments[i]}]
logger.debug(f"{parallax_data}")
try:
with db.engine.connect() as conn:
conn.execute(db.Parallaxes.insert().values(parallax_data))
conn.commit()
n_added += 1
logger.info(f"Parallax added to database: \n "
f"{parallax_data}")
except sqlalchemy.exc.IntegrityError:
msg = "The source may not exist in Sources table.\n" \
"The parallax reference may not exist in Publications table. " \
"Add it with add_publication function. \n" \
"The parallax measurement may be a duplicate."
logger.error(msg)
raise SimpleError(msg)
logger.info(f"Total Parallaxes added to database: {n_added} \n")
return
# PROPER MOTIONS
def ingest_proper_motions(db, sources, pm_ras, pm_ra_errs, pm_decs, pm_dec_errs, pm_references):
"""
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object
sources: list[str]
list of source names
pm_ras: list[float]
list of proper motions in right ascension (RA)
pm_ra_errs: list[float]
list of uncertanties in proper motion RA
pm_decs: list[float]
list of proper motions in declination (dec)
pm_dec_errs: list[float]
list of uncertanties in proper motion dec
pm_references: str or list[str]
Reference or list of references for the proper motion measurements
Examples
----------
> ingest_proper_motions(db, my_sources, my_pm_ra, my_pm_ra_unc, my_pm_dec, my_pm_dec_unc, my_pm_refs,
verbose = True)
"""
n_sources = len(sources)
# Convert single element input value to list
if isinstance(pm_references, str):
pm_references = [pm_references] * len(sources)
input_float_values = [pm_ras, pm_ra_errs, pm_decs, pm_dec_errs]
for i, input_value in enumerate(input_float_values):
if isinstance(input_value, float):
input_value = [input_value] * n_sources
input_float_values[i] = input_value
pm_ras, pm_ra_errs, pm_decs, pm_dec_errs = input_float_values
n_added = 0
for i, source in enumerate(sources):
db_name = find_source_in_db(db, source)
if len(db_name) != 1:
msg = f"No unique source match for {source} in the database"
raise SimpleError(msg)
else:
db_name = db_name[0]
# Search for existing proper motion data and determine if this is the best
# If no previous measurement exists, set the new one to the Adopted measurement
# adopted = None # not used
source_pm_data = db.query(db.ProperMotions).filter(db.ProperMotions.c.source == db_name).table()
if source_pm_data is None or len(source_pm_data) == 0:
# if there's no other measurements in the database, set new data Adopted = True
adopted = True
elif len(source_pm_data) > 0:
# check to see if other measurement is a duplicate of the new data
dupe_ind = source_pm_data['reference'] == pm_references[i]
if sum(dupe_ind):
logger.debug(f"Duplicate measurement\n, {source_pm_data}")
continue
# check for previous adopted measurement
adopted_ind = source_pm_data['adopted'] == 1
if sum(adopted_ind):
old_adopted = source_pm_data[adopted_ind]
else:
old_adopted = None
# if errors of new data are less than other measurements, set Adopted = True.
if pm_ra_errs[i] < min(source_pm_data['mu_ra_error']) and pm_dec_errs[i] < min(
source_pm_data['mu_dec_error']):
adopted = True
# unset old adopted if it exists
if old_adopted:
with db.engine.connect() as conn:
conn.execute(
db.ProperMotions. \
update(). \
where(and_(db.ProperMotions.c.source == old_adopted['source'][0],
db.ProperMotions.c.reference == old_adopted['reference'][0])). \
values(adopted=False)
)
conn.commit()
else:
adopted = False
# if no previous adopted measurement, set adopted to the measurement with the smallest errors
if not adopted and not old_adopted and \
min(source_pm_data['mu_ra_error']) < pm_ra_errs[i] and \
min(source_pm_data['mu_dec_error']) < pm_dec_errs[i]:
adopted_pm = db.ProperMotions.update().where(and_(db.ProperMotions.c.source == db_name,
db.ProperMotions.c.mu_ra_error == min(
source_pm_data['mu_ra_error']),
db.ProperMotions.c.mu_dec_error == min(
source_pm_data['mu_dec_error']))). \
values(adopted=True)
with db.engine.connect() as conn:
conn.execute(adopted_pm)
conn.commit()
logger.debug("!!! Another Proper motion exists")
if logger.level == 10:
source_pm_data.pprint_all()
else:
msg = 'Unexpected state'
logger.error(msg)
raise RuntimeError(msg)
# Construct data to be added
pm_data = [{'source': db_name,
'mu_ra': pm_ras[i],
'mu_ra_error': pm_ra_errs[i],
'mu_dec': pm_decs[i],
'mu_dec_error': pm_dec_errs[i],
'adopted': adopted,
'reference': pm_references[i]}]
logger.debug(f'Proper motion data to add: {pm_data}')
try:
with db.engine.connect() as conn:
conn.execute(db.ProperMotions.insert().values(pm_data))
conn.commit()
n_added += 1
except sqlalchemy.exc.IntegrityError:
msg = "The source may not exist in Sources table.\n" \
"The proper motion reference may not exist in Publications table. " \
"Add it with add_publication function. \n" \
"The proper motion measurement may be a duplicate."
logger.error(msg)
raise SimpleError(msg)
updated_source_pm_data = db.query(db.ProperMotions).filter(db.ProperMotions.c.source == db_name).table()
logger.info('Updated proper motion data:')
if logger.level == 20: # Info = 20, Debug = 10
updated_source_pm_data.pprint_all()
return
# PHOTOMETRY
def ingest_photometry(db, sources, bands, magnitudes, magnitude_errors, reference, ucds=None,
telescope=None, instrument=None, epoch=None, comments=None, raise_error=True):
"""
TODO: Write Docstring
Parameters
----------
db: astrodbkit2.astrodb.Database
sources: list[str]
bands: str or list[str]
magnitudes: list[float]
magnitude_errors: list[float]
reference: str or list[str]
ucds: str or list[str], optional
telescope: str or list[str]
instrument: str or list[str]
epoch: list[float], optional
comments: list[str], optional
raise_error: bool, optional
True (default): Raise an error if a source cannot be ingested
False: Log a warning but skip sources which cannot be ingested
Returns
-------
"""
if isinstance(sources, str):
n_sources = 1
sources = [sources]
else:
n_sources = len(sources)
# Convert single element input values into lists
input_values = [bands, reference, telescope, instrument, ucds]
for i, input_value in enumerate(input_values):
if isinstance(input_value, str):
input_value = [input_value] * n_sources
elif input_value is None:
input_value = [None] * n_sources
input_values[i] = input_value
bands, reference, telescope, instrument, ucds = input_values
input_float_values = [magnitudes, magnitude_errors]
for i, input_value in enumerate(input_float_values):
if isinstance(input_value, float):
input_value = [input_value] * n_sources
input_float_values[i] = input_value
magnitudes, magnitude_errors = input_float_values
if n_sources != len(magnitudes) or n_sources != len(magnitude_errors):
msg = f"N Sources: {len(sources)}, " \
f"N Magnitudes: {len(magnitudes)}, N Mag errors: {len(magnitude_errors)}," \
f"\nSources, magnitudes, and magnitude error lists should all be same length"
logger.error(msg)
raise RuntimeError(msg)
if n_sources != len(reference) or n_sources != len(telescope) or n_sources != len(bands):
msg = "All lists should be same length"
logger.error(msg)
raise RuntimeError(msg)
n_added = 0
for i, source in enumerate(sources):
db_name = find_source_in_db(db, source)
if len(db_name) != 1:
msg = f"No unique source match for {source} in the database"
raise SimpleError(msg)
else:
db_name = db_name[0]
# if the uncertainty is masked, don't ingest anything
if isinstance(magnitude_errors[i], np.ma.core.MaskedConstant):
mag_error = None
else:
mag_error = str(magnitude_errors[i])
# Construct data to be added
photometry_data = [{'source': db_name,
'band': bands[i],
'ucd': ucds[i],
'magnitude': str(magnitudes[i]), # Convert to string to maintain significant digits
'magnitude_error': mag_error,
'telescope': telescope[i],
'instrument': instrument[i],
'epoch': epoch,
'comments': comments,
'reference': reference[i]}]
logger.debug(f'Photometry data: {photometry_data}')
try:
with db.engine.connect() as conn:
conn.execute(db.Photometry.insert().values(photometry_data))
conn.commit()
n_added += 1
logger.info(f"Photometry measurement added: \n"
f"{photometry_data}")
except sqlalchemy.exc.IntegrityError as e:
if 'UNIQUE constraint failed:' in str(e):
msg = "The measurement may be a duplicate."
if raise_error:
logger.error(msg)
raise SimpleError(msg)
else:
logger.warning(msg)
continue
else:
msg = "The source may not exist in Sources table.\n" \
"The reference may not exist in the Publications table. " \
"Add it with add_publication function."
logger.error(msg)
raise SimpleError(msg)
logger.info(f"Total photometry measurements added to database: {n_added} \n")
return
# SPECTRA
def ingest_spectra(db, sources, spectra, regimes, telescopes, instruments, modes, obs_dates, references,original_spectra=None,
wavelength_units=None, flux_units=None, wavelength_order=None,
comments=None, other_references=None, raise_error=True):
"""
Parameters
----------
db: astrodbkit2.astrodb.Database
sources: list[str]
List of source names
spectra: list[str]
List of filenames corresponding to spectra files
regimes: str or list[str]
List or string
telescopes: str or list[str]
List or string
instruments: str or list[str]
List or string
modes: str or list[str]
List or string
obs_dates: str or datetime
List of strings or datetime objects
references: list[str]
List or string
original_spectra: list[str]
List of filenames corresponding to original spectra files
wavelength_units: str or list[str] or Quantity, optional
List or string
flux_units: str or list[str] or Quantity, optional
List or string
wavelength_order: list[int], optional
comments: list[str], optional
List of strings
other_references: list[str], optional
List of strings
raise_error: bool
"""
# Convert single value input values to lists
if isinstance(sources, str):
sources = [sources]
if isinstance(spectra, str):
spectra = [spectra]
input_values = [regimes, telescopes, instruments, modes, obs_dates, wavelength_order, wavelength_units, flux_units,
references,comments, other_references]
for i, input_value in enumerate(input_values):
if isinstance(input_value, str):
input_values[i] = [input_value] * len(sources)
elif isinstance(input_value, type(None)):
input_values[i] = [None] * len(sources)
regimes, telescopes, instruments, modes, obs_dates, wavelength_order, wavelength_units, flux_units, \
references, comments, other_references = input_values
n_spectra = len(spectra)
n_skipped = 0
n_dupes = 0
n_missing_instrument = 0
n_added = 0
n_blank = 0
msg = f'Trying to add {n_spectra} spectra'
logger.info(msg)
for i, source in enumerate(sources):
# TODO: check that spectrum can be read by astrodbkit
# Get source name as it appears in the database
db_name = find_source_in_db(db, source)
if len(db_name) != 1:
msg = f"No unique source match for {source} in the database"
raise SimpleError(msg)
else:
db_name = db_name[0]
# Check if spectrum file is accessible
# First check for internet
internet = check_internet_connection()
if internet:
request_response = requests.head(spectra[i])
status_code = request_response.status_code # The website is up if the status code is 200
if status_code != 200:
n_skipped += 1
msg = "The spectrum location does not appear to be valid: \n" \
f'spectrum: {spectra[i]} \n' \
f'status code: {status_code}'
logger.error(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
else:
msg = f"The spectrum location appears up: {spectra[i]}"
logger.debug(msg)
if original_spectra:
request_response1 = requests.head(original_spectra[i])
status_code1 = request_response1.status_code
if status_code1 != 200:
n_skipped += 1
msg = "The spectrum location does not appear to be valid: \n" \
f'spectrum: {original_spectra[i]} \n' \
f'status code: {status_code1}'
logger.error(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
else:
msg = f"The spectrum location appears up: {original_spectra[i]}"
logger.debug(msg)
else:
msg = "No internet connection. Internet is needed to check spectrum files."
raise SimpleError(msg)
# Find what spectra already exists in database for this source
source_spec_data = db.query(db.Spectra).filter(db.Spectra.c.source == db_name).table()
# SKIP if observation date is blank
# TODO: try to populate obs date from meta data in spectrum file
if ma.is_masked(obs_dates[i]) or obs_dates[i] == '':
obs_date = None
missing_obs_msg = f"Skipping spectrum with missing observation date: {source} \n"
missing_row_spe = f"{source, obs_dates[i], references[i]} \n"
logger.info(missing_obs_msg)
logger.debug(missing_row_spe)
n_blank += 1
continue
else:
try:
obs_date = pd.to_datetime(obs_dates[i]) # TODO: Another method that doesn't require pandas?
except ValueError:
n_skipped += 1
if raise_error:
msg = f"{source}: Can't convert obs date to Date Time object: {obs_dates[i]}"
logger.error(msg)
raise SimpleError
except dateutil.parser._parser.ParserError:
n_skipped += 1
if raise_error:
msg = f"{source}: Can't convert obs date to Date Time object: {obs_dates[i]}"
logger.error(msg)
raise SimpleError
else:
msg = f"Skipping {source} Can't convert obs date to Date Time object: {obs_dates[i]}"
logger.warning(msg)
continue
# TODO: make it possible to ingest units and order
row_data = [{'source': db_name,
'spectrum': spectra[i],
'original_spectrum': None, # if ma.is_masked(original_spectra[i]) or isinstance(original_spectra,None)
# else original_spectra[i],
'local_spectrum': None, # if ma.is_masked(local_spectra[i]) else local_spectra[i],
'regime': regimes[i],
'telescope': telescopes[i],
'instrument': None if ma.is_masked(instruments[i]) else instruments[i],
'mode': None if ma.is_masked(modes[i]) else modes[i],
'observation_date': obs_date,
'wavelength_units': None if ma.is_masked(wavelength_units[i]) else wavelength_units[i],
'flux_units': None if ma.is_masked(flux_units[i]) else flux_units[i],
'wavelength_order': None if ma.is_masked(wavelength_order[i]) else wavelength_order[i],
'comments': None if ma.is_masked(comments[i]) else comments[i],
'reference': references[i],
'other_references': None if ma.is_masked(other_references[i]) else other_references[i]}]
logger.debug(row_data)
try:
with db.engine.connect() as conn:
conn.execute(db.Spectra.insert().values(row_data))
conn.commit()
n_added += 1
except sqlalchemy.exc.IntegrityError as e:
if "CHECK constraint failed: regime" in str(e):
msg = f"Regime provided is not in schema: {regimes[i]}"
logger.error(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
if db.query(db.Publications).filter(db.Publications.c.publication == references[i]).count() == 0:
msg = f"Spectrum for {source} could not be added to the database because the reference {references[i]} is not in Publications table. \n" \
f"(Add it with ingest_publication function.) \n "
logger.warning(msg)
if raise_error:
raise SimpleError(msg)
else:
continue
# check telescope, instrument, mode exists
telescope = db.query(db.Telescopes).filter(db.Telescopes.c.name == row_data[0]['telescope']).table()
instrument = db.query(db.Instruments).filter(db.Instruments.c.name == row_data[0]['instrument']).table()
mode = db.query(db.Modes).filter(db.Modes.c.name == row_data[0]['mode']).table()
if len(source_spec_data) > 0: # Spectra data already exists
# check for duplicate measurement
ref_dupe_ind = source_spec_data['reference'] == references[i]
date_dupe_ind = source_spec_data['observation_date'] == obs_date
instrument_dupe_ind = source_spec_data['instrument'] == instruments[i]
mode_dupe_ind = source_spec_data['mode'] == modes[i]
if sum(ref_dupe_ind) and sum(date_dupe_ind) and sum(instrument_dupe_ind) and sum(mode_dupe_ind):
msg = f"Skipping suspected duplicate measurement\n{source}\n"
msg2 = f"{source_spec_data[ref_dupe_ind]['source', 'instrument', 'mode', 'observation_date', 'reference']}"
msg3 = f"{instruments[i], modes[i], obs_date, references[i], spectra[i]} \n"
logger.warning(msg)
logger.debug(msg2 + msg3 + str(e))
n_dupes += 1
if raise_error:
raise SimpleError
else:
continue # Skip duplicate measurement
# else:
# msg = f'Spectrum could not be added to the database (other data exist): \n ' \
# f"{source, instruments[i], modes[i], obs_date, references[i], spectra[i]} \n"
# msg2 = f"Existing Data: \n "
# # f"{source_spec_data[ref_dupe_ind]['source', 'instrument', 'mode', 'observation_date', 'reference', 'spectrum']}"
# msg3 = f"Data not able to add: \n {row_data} \n "
# logger.warning(msg + msg2)
# source_spec_data[ref_dupe_ind][
# 'source', 'instrument', 'mode', 'observation_date', 'reference', 'spectrum'].pprint_all()
# logger.debug(msg3)
# n_skipped += 1
# continue
if len(instrument) == 0 or len(mode) == 0 or len(telescope) == 0:
msg = f'Spectrum for {source} could not be added to the database. \n' \
f' Telescope, Instrument, and/or Mode need to be added to the appropriate table. \n' \
f" Trying to find telescope: {row_data[0]['telescope']}, instrument: {row_data[0]['instrument']}, " \
f" mode: {row_data[0]['mode']} \n" \
f" Telescope: {telescope}, Instrument: {instrument}, Mode: {mode} \n"
logger.error(msg)
n_missing_instrument += 1
if raise_error:
raise SimpleError
else:
continue
else:
msg = f'Spectrum for {source} could not be added to the database for unknown reason: \n {row_data} \n '
logger.error(msg)
raise SimpleError(msg)
msg = f"SPECTRA ADDED: {n_added} \n" \
f" Spectra with blank obs_date: {n_blank} \n" \
f" Suspected duplicates skipped: {n_dupes}\n" \
f" Missing Telescope/Instrument/Mode: {n_missing_instrument} \n" \
f" Spectra skipped for unknown reason: {n_skipped} \n"
if n_spectra == 1:
logger.info(f"Added {source} : \n"
f"{row_data}")
else:
logger.info(msg)
if n_added + n_dupes + n_blank + n_skipped + n_missing_instrument != n_spectra:
msg = "Numbers don't add up: "
logger.error(msg)
raise SimpleError(msg)
spec_count = db.query(Spectra.regime, func.count(Spectra.regime)).group_by(Spectra.regime).all()
spec_ref_count = db.query(Spectra.reference, func.count(Spectra.reference)). \
group_by(Spectra.reference).order_by(func.count(Spectra.reference).desc()).limit(20).all()
telescope_spec_count = db.query(Spectra.telescope, func.count(Spectra.telescope)). \
group_by(Spectra.telescope).order_by(func.count(Spectra.telescope).desc()).limit(20).all()
# logger.info(f'Spectra in the database: \n {spec_count} \n {spec_ref_count} \n {telescope_spec_count}')
return
def ingest_instrument(db, telescope=None, instrument=None, mode=None):
"""
Script to ingest instrumentation
TODO: Add option to ingest references for the telescope and instruments
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object created by astrodbkit2
telescope: str
instrument: str
mode: str
Returns
-------
None
"""
# Make sure enough inputs are provided
if telescope is None and (instrument is None or mode is None):
msg = "Telescope, Instrument, and Mode must be provided"
logger.error(msg)
raise SimpleError(msg)
msg_search = f'Searching for {telescope}, {instrument}, {mode} in database'
logger.info(msg_search)
# Search for the inputs in the database
telescope_db = db.query(db.Telescopes).filter(db.Telescopes.c.telescope == telescope).table()
mode_db = db.query(db.Instruments).filter(and_(db.Instruments.c.mode == mode,
db.Instruments.c.instrument == instrument,
db.Instruments.c.telescope == telescope)).table()
if len(telescope_db) == 1 and len(mode_db) == 1:
msg_found = f'{telescope}, {instrument}, and {mode} are already in the database.'
logger.info(msg_found)
return
# Ingest telescope entry if not already present
if telescope is not None and len(telescope_db) == 0:
telescope_add = [{'telescope': telescope}]
try:
with db.engine.connect() as conn:
conn.execute(db.Telescopes.insert().values(telescope_add))
conn.commit()
msg_telescope = f'{telescope} was successfully ingested in the database'
logger.info(msg_telescope)
except sqlalchemy.exc.IntegrityError as e: # pylint: disable=invalid-name
msg = 'Telescope could not be ingested'
logger.error(msg)
raise SimpleError(msg + '\n' + str(e))
# Ingest instrument+mode (requires telescope) if not already present
if telescope is not None and instrument is not None and mode is not None and len(mode_db) == 0:
instrument_add = [{'instrument': instrument,
'mode': mode,
'telescope': telescope}]
try:
with db.engine.connect() as conn:
conn.execute(db.Instruments.insert().values(instrument_add))
conn.commit()
msg_instrument = f'{instrument} was successfully ingested in the database.'
logger.info(msg_instrument)
except sqlalchemy.exc.IntegrityError as e: # pylint: disable=invalid-name
msg = 'Instrument/Mode could not be ingested'
logger.error(msg)
raise SimpleError(msg + '\n' + str(e))
return
def get_gaiadr3(gaia_id, verbose=True):
"""
Currently setup just to query one source
TODO: add some debug and info messages
Parameters
----------
gaia_id: str or int
verbose
Returns
-------
Table of Gaia data
"""
gaia_query_string = f"SELECT " \
f"parallax, parallax_error, " \
f"pmra, pmra_error, pmdec, pmdec_error, " \
f"phot_g_mean_flux, phot_g_mean_flux_error, phot_g_mean_mag, " \
f"phot_rp_mean_flux, phot_rp_mean_flux_error, phot_rp_mean_mag " \
f"FROM gaiadr3.gaia_source WHERE " \
f"gaiadr3.gaia_source.source_id = '{gaia_id}'"
job_gaia_query = Gaia.launch_job(gaia_query_string, verbose=verbose)
gaia_data = job_gaia_query.get_results()
return gaia_data
def ingest_gaia_photometry(db, sources, gaia_data, ref):
# TODO write some tests
unmasked_gphot = np.logical_not(gaia_data['phot_g_mean_mag'].mask).nonzero()
gaia_g_phot = gaia_data[unmasked_gphot]['phot_g_mean_flux', 'phot_g_mean_flux_error',
'phot_g_mean_mag']
unmased_rpphot = np.logical_not(gaia_data['phot_rp_mean_mag'].mask).nonzero()
gaia_rp_phot = gaia_data[unmased_rpphot]['phot_rp_mean_flux', 'phot_rp_mean_flux_error',
'phot_rp_mean_mag']
# e_Gmag=abs(-2.5/ln(10)*e_FG/FG) from Vizier Note 37 on Gaia DR2 (I/345/gaia2)
gaia_g_phot['g_unc'] = np.abs(
-2.5 / np.log(10) * gaia_g_phot['phot_g_mean_flux_error'] / gaia_g_phot['phot_g_mean_flux'])
gaia_rp_phot['rp_unc'] = np.abs(
-2.5 / np.log(10) * gaia_rp_phot['phot_rp_mean_flux_error'] / gaia_rp_phot['phot_rp_mean_flux'])
if ref == 'GaiaDR2':
g_band_name = 'GAIA2.G'
rp_band_name = 'GAIA2.Grp'
elif ref == 'GaiaEDR3' or ref == 'GaiaDR3':
g_band_name = 'GAIA3.G'
rp_band_name = 'GAIA3.Grp'
else:
raise Exception
ingest_photometry(db, sources, g_band_name, gaia_g_phot['phot_g_mean_mag'], gaia_g_phot['g_unc'],
ref, ucds='em.opt', telescope='Gaia', instrument='Gaia')
ingest_photometry(db, sources, rp_band_name, gaia_rp_phot['phot_rp_mean_mag'],
gaia_rp_phot['rp_unc'], ref, ucds='em.opt.R', telescope='Gaia', instrument='Gaia')
return
def ingest_gaia_parallaxes(db, sources, gaia_data, ref):
# TODO write some tests
unmasked_pi = np.logical_not(gaia_data['parallax'].mask).nonzero()
gaia_parallaxes = gaia_data[unmasked_pi]['parallax', 'parallax_error']
ingest_parallaxes(db, sources, gaia_parallaxes['parallax'],
gaia_parallaxes['parallax_error'], ref)
def ingest_gaia_pms(db, sources, gaia_data, ref):
# TODO write some tests
unmasked_pms = np.logical_not(gaia_data['pmra'].mask).nonzero()
pms = gaia_data[unmasked_pms]['pmra', 'pmra_error', 'pmdec', 'pmdec_error']
refs = [ref] * len(pms)
ingest_proper_motions(db, sources,
pms['pmra'], pms['pmra_error'],
pms['pmdec'], pms['pmdec_error'],
refs)
def ingest_spectrum_from_fits(db, source, spectrum_fits_file):
"""
Ingests spectrum using data found in the header
Parameters
----------
db
source
spectrum_fits_file
"""
header = fits.getheader(spectrum_fits_file)
regime = header['SPECBAND']
if regime == 'opt':
regime = 'optical'
telescope = header['TELESCOP']
instrument = header['INSTRUME']
try:
mode = header['MODE']
except KeyError:
mode = None
obs_date = header['DATE-OBS']
doi = header['REFERENC']
data_header = fits.getheader(spectrum_fits_file, 1)
w_unit = data_header['TUNIT1']
flux_unit = data_header['TUNIT2']
reference_match = db.query(db.Publications.c.publication).filter(db.Publications.c.doi == doi).table()
reference = reference_match['publication'][0]
ingest_spectra(db, source, spectrum_fits_file, regime, telescope, instrument, None, obs_date, reference,
wavelength_units=w_unit, flux_units=flux_unit)
#COMPANION RELATIONSHIP
def ingest_companion_relationships(db, source, companion_name, relationship,
projected_separation_arcsec = None, projected_separation_error = None,
comment = None, ref = None, other_companion_names = None):
"""
This function ingests a single row in to the CompanionRelationship table
Parameters
----------
db: astrodbkit2.astrodb.Database
Database object created by astrodbkit2
source: str
Name of source as it appears in sources table
relationship: str
relationship is of the souce to its companion
should be one of the following: Child, Sibling, Parent, or Unresolved Parent
see note
companion_name: str
SIMBAD resovable name of companion object
projected_separation_arcsec: float (optional)
Projected separtaion should be recorded in arc sec
projected_separation_error: float (optional)
Projected separtaion should be recorded in arc sec
references: str (optional)
Discovery references of sources
comments: str (optional)
Comments
other_companion_names: comma separated names (optional)
other names used to identify the companion
ex: 'HD 89744, NLTT 24128, GJ 9326'
Returns
-------
None
Note: Relationships are constrained to one of the following:
- *Child*: The source is lower mass/fainter than the companion
- *Sibling*: The source is similar to the companion
- *Parent*: The source is higher mass/brighter than the companion
- *Unresolved Parent*: The source is the unresolved, combined light source of an unresolved
multiple system which includes the companion
"""
# checking relationship entered
possible_relationships = ['Child', 'Sibling', 'Parent', 'Unresolved Parent', None]
# check captialization
if relationship.title() != relationship:
logger.info(f"Relationship captilization changed from {relationship} to {relationship.title()} ")
relationship = relationship.title()
if relationship not in possible_relationships:
msg = f"Relationship given for {source}, {companion_name}: {relationship} NOT one of the constrained relationships \n {possible_relationships}"
logger.error(msg)
raise SimpleError(msg)
# source canot be same as companion
if source == companion_name:
msg = f"{source}: Source cannot be the same as companion name"
logger.error(msg)
raise SimpleError(msg)
if source == companion_name:
msg = f"{source}: Source cannot be the same as companion name"
logger.error(msg)
raise SimpleError(msg)
if projected_separation_arcsec != None and projected_separation_arcsec < 0:
msg = f"Projected separation: {projected_separation_arcsec}, cannot be negative"
logger.error(msg)
raise SimpleError(msg)
if projected_separation_error != None and projected_separation_error < 0:
msg = f"Projected separation error: {projected_separation_error}, cannot be negative"
logger.error(msg)
raise SimpleError(msg)
# check other names
## make sure companion name is included in the list
if other_companion_names == None:
other_companion_names = companion_name
else:
companion_name_list = other_companion_names.split(', ')
if companion_name not in companion_name_list:
companion_name_list.append(companion_name)
other_companion_names = (', ').join(companion_name_list)
try:
with db.engine.connect() as conn:
conn.execute(db.CompanionRelationships.insert().values(
{'source': source,
'companion_name': companion_name,
'projected_separation_arcsec':projected_separation_arcsec,
'projected_separation_error':projected_separation_error,
'relationship':relationship,
'reference': ref,
'comments': comment,
'other_companion_names': other_companion_names}))
conn.commit()
logger.info(f"ComapnionRelationship added: ",
[source, companion_name, relationship, projected_separation_arcsec, \
projected_separation_error, comment, ref])
except sqlalchemy.exc.IntegrityError as e:
if 'UNIQUE constraint failed:' in str(e):
msg = "The companion may be a duplicate."
logger.error(msg)
raise SimpleError(msg)
else:
msg = ("Make sure all require parameters are provided. \\"
"Other possible errors: source may not exist in Sources table \\" \
"or the reference may not exist in the Publications table. " )
logger.error(msg)
raise SimpleError(msg)
| true
|
58efba2052299360dca036b514709ebb99e6a875
|
Python
|
zyp19/leetcode1
|
/秋招提前批/民生银行/2.py
|
UTF-8
| 335
| 3.4375
| 3
|
[] |
no_license
|
import sys
num = 0
t = input()
if t == "1":
# 统计行数
for line in sys.stdin.readline():
if not line:
continue
num += 1
print(num)
elif t == "Q":
print("Quit")
else:
print("Wrong input.Please re-choose")
print("Menu Function Test")
print("1:Count Lines")
print("Q:Quit")
| true
|
bc13912d3f11f2c5df40c27276544892ad1835ab
|
Python
|
Alymostafa/OS--linux--Process-Manger
|
/os project/project.txt
|
UTF-8
| 998
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import os
import sys
myhost = os.uname()[1]
z=1
while z:
print ("A. List all the processes in the system.")
print ("B. List all the processes grouped by user.")
print ("C. Display process ID of all the processes.")
print ("D. Run/stop a specific process.")
print ("E. Send specific signals to specific process.")
print ("0. Exit")
print("Please Enter Your Command")
x = input()
if x=='a'or x=='A':
os.system('top')
if x=='b'or x=='B':
user = input('type username:\n')
os.system('ps -u'+user)
if x=='C'or x=='c':
os.system('pgrep -u'+myhost+' -l')
if x=='D' or x=='d':
proc = input('type proccess name:\n')
os.system('pkill -9 ' + proc)
if x=='E' or x=='e':
print('Choose the number of the signal:')
c = open("signal.txt","r")
cont = c.read()
print(cont)
sig = input()
pro = input('type process name:\n')
os.system('pkill -'+sig+' '+pro)
if x =='0':
sys.exit()
else:
print("Invalid Choice\n")
| true
|
e21c9dee11e21d0141aefa42a06617ac844ac23e
|
Python
|
stroxler/tdxutil
|
/tdxutil/exceptions.py
|
UTF-8
| 1,002
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
"""
Tools to make working with exceptions easier.
"""
def try_with_context(error_context, f, *args, **kwargs):
"""
Non-lazy version of `try_with_lazy_context`. Everything
is the same except `error_context` should be a string.
"""
return try_with_lazy_context(lambda: error_context,
f, *args, **kwargs)
def try_with_lazy_context(error_context, f, *args, **kwargs):
"""
Call an arbitrary function with arbitrary args / kwargs, wrapping
in an exception handler that attaches a prefix to the exception
message and then raises (the original stack trace is preserved).
The `error_context` argument should be a lambda taking no
arguments and returning a message which gets prepended to
any errors.
"""
try:
return f(*args, **kwargs)
except Exception as e:
msg = error_context()
e.args = tuple(
["%s:\n%s" % (msg, e.args[0])] + [a for a in e.args[1:]]
)
raise
| true
|
89309fa0695cfc6e3786ae9a667dbed221d85b3d
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03700/s963066209.py
|
UTF-8
| 757
| 3.140625
| 3
|
[] |
no_license
|
import sys,math
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
n,a,b = map(int,readline().split())
h = [int(readline()) for i in range(n)]
def is_ok(arg):
chk = 0
for i in h:
chk += max(0,math.ceil((-arg*b+i)/(a-b)))
return chk <= arg
def bisect_ok(ng, ok):
'''
初期値のng,okを受け取り,is_okを満たす最小(最大)のokを返す
まずis_okを定義
ng = 最小の値-1 ok = 最大の値+1 で最小
最大最小が逆の場合はng ok をひっくり返す
'''
while (abs(ok - ng) > 1):
mid = (ok + ng) // 2
if is_ok(mid):
ok = mid
else:
ng = mid
return ok
print(bisect_ok(0,10**9))
| true
|
a9f500c64764f7d8fa2d0df70cbafd0c8eb0e521
|
Python
|
IgnatIvanov/Generating-Randomness_JetBrains_Academy
|
/Generating Randomness/task/predictor/predictor.py
|
UTF-8
| 2,703
| 3.84375
| 4
|
[] |
no_license
|
import numpy as np
data = ''
while True:
print('Print a random string containing 0 or 1:', sep='\n')
user_in = str(input())
for digit in user_in:
if digit == '0' or digit == '1':
data += digit
if len(data) > 99:
break
else:
print('Current data length is {}, {} symbols left'.format(len(data), 100 - len(data)))
print()
print('Final data string:', data, sep='\n')
print()
zeros = dict()
ones = dict()
# sums = dict()
for pointer in range(0, len(data) - 3):
if data[pointer + 3] == '0':
triad = data[pointer] + data[pointer + 1] + data[pointer + 2]
zeros.setdefault(triad, 0)
zeros[triad] += 1
elif data[pointer + 3] == '1':
triad = data[pointer] + data[pointer + 1] + data[pointer + 2]
ones.setdefault(triad, 0)
ones[triad] += 1
max_triad = ''
max_sum = 0
for x in range(0, 2):
for y in range(0, 2):
for z in range(0, 2):
triad = str(x) + str(y) + str(z)
zeros.setdefault(triad, 0)
ones.setdefault(triad, 0)
# print('{}{}{}: {},{}'.format(x, y, z, zeros.get(triad), ones.get(triad)))
current_sum = zeros.get(triad) + ones.get(triad)
if current_sum > max_sum:
max_triad = triad
print(r'''You have $1000. Every time the system successfully predicts your next press, you lose $1.
Otherwise, you earn $1. Print "enough" to leave the game. Let's go!
''')
capital = 1000
while True:
print('Print a random string containing 0 or 1:')
test_str = str(input())
skip_flag = False
if test_str == 'enough': # Exiting the game
print('Game over!')
break
for letter in test_str:
if letter != '0' and letter != '1':
skip_flag = True
break
if skip_flag:
continue
predicted_str = ''
predicted_str += max_triad
for i in range(2, len(test_str) - 1):
triad = test_str[i - 2] + test_str[i - 1] + test_str[i]
next_bit = ''
if zeros.get(triad) >= ones.get(triad):
next_bit = '0'
else:
next_bit = '1'
predicted_str += next_bit
print('prediction', predicted_str, sep='\n')
correct_n = 0
for i in range(3, len(test_str)):
if test_str[i] == predicted_str[i]:
correct_n += 1
accuracy = correct_n / (len(test_str) - 3) * 100
accuracy = int(accuracy * 100) / 100
print('Computer guessed right {} out of {} symbols ({} %)'.format(correct_n, len(test_str) - 3, accuracy))
capital -= correct_n
capital += len(test_str) - 3 - correct_n
print('Your capital is now ${}'.format(capital))
print()
| true
|
f955a9526fd3354818eb909ec9e2b4fc0edc18f2
|
Python
|
Alibaba-Gemini-Lab/tf-encrypted
|
/examples/logistic/common.py
|
UTF-8
| 6,086
| 3.140625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
"""Provide classes to perform private training and private prediction with
logistic regression"""
import tensorflow as tf
import tf_encrypted as tfe
class LogisticRegression:
"""Contains methods to build and train logistic regression."""
def __init__(self, num_features):
self.w = tfe.define_private_variable(
tf.random_uniform([num_features, 1], -0.01, 0.01)
)
self.w_masked = tfe.mask(self.w)
self.b = tfe.define_private_variable(tf.zeros([1]))
self.b_masked = tfe.mask(self.b)
@property
def weights(self):
return self.w, self.b
def forward(self, x):
with tf.name_scope("forward"):
out = tfe.matmul(x, self.w_masked) + self.b_masked
y = tfe.sigmoid(out)
return y
def backward(self, x, dy, learning_rate=0.01):
batch_size = x.shape.as_list()[0]
with tf.name_scope("backward"):
dw = tfe.matmul(tfe.transpose(x), dy) / batch_size
db = tfe.reduce_sum(dy, axis=0) / batch_size
assign_ops = [
tfe.assign(self.w, self.w - dw * learning_rate),
tfe.assign(self.b, self.b - db * learning_rate),
]
return assign_ops
def loss_grad(self, y, y_hat):
with tf.name_scope("loss-grad"):
dy = y_hat - y
return dy
def fit_batch(self, x, y):
with tf.name_scope("fit-batch"):
y_hat = self.forward(x)
dy = self.loss_grad(y, y_hat)
fit_batch_op = self.backward(x, dy)
return fit_batch_op
def fit(self, sess, x, y, num_batches):
fit_batch_op = self.fit_batch(x, y)
for batch in range(num_batches):
print("Batch {0: >4d}".format(batch))
sess.run(fit_batch_op, tag="fit-batch")
def evaluate(self, sess, x, y, data_owner):
"""Return the accuracy"""
def print_accuracy(y_hat, y) -> tf.Operation:
with tf.name_scope("print-accuracy"):
correct_prediction = tf.equal(tf.round(y_hat), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print_op = tf.print(
"Accuracy on {}:".format(data_owner.player_name), accuracy
)
return print_op
with tf.name_scope("evaluate"):
y_hat = self.forward(x)
print_accuracy_op = tfe.define_output(
data_owner.player_name, [y_hat, y], print_accuracy
)
sess.run(print_accuracy_op, tag="evaluate")
class DataOwner:
"""Contains code meant to be executed by a data owner Player."""
def __init__(
self, player_name, num_features, training_set_size, test_set_size, batch_size
):
self.player_name = player_name
self.num_features = num_features
self.training_set_size = training_set_size
self.test_set_size = test_set_size
self.batch_size = batch_size
self.train_initializer = None
self.test_initializer = None
@property
def initializer(self):
return tf.group(self.train_initializer, self.test_initializer)
@tfe.local_computation
def provide_training_data(self):
"""Preprocess training dataset
Return single batch of training dataset
"""
def norm(x, y):
return tf.cast(x, tf.float32), tf.expand_dims(y, 0)
x_raw = tf.random.uniform(
minval=-0.5, maxval=0.5, shape=[self.training_set_size, self.num_features]
)
y_raw = tf.cast(tf.reduce_mean(x_raw, axis=1) > 0, dtype=tf.float32)
train_set = (
tf.data.Dataset.from_tensor_slices((x_raw, y_raw))
.map(norm)
.repeat()
.shuffle(buffer_size=self.batch_size)
.batch(self.batch_size)
)
train_set_iterator = train_set.make_initializable_iterator()
self.train_initializer = train_set_iterator.initializer
x, y = train_set_iterator.get_next()
x = tf.reshape(x, [self.batch_size, self.num_features])
y = tf.reshape(y, [self.batch_size, 1])
return x, y
@tfe.local_computation
def provide_testing_data(self):
"""Preprocess testing dataset
Return single batch of testing dataset
"""
def norm(x, y):
return tf.cast(x, tf.float32), tf.expand_dims(y, 0)
x_raw = tf.random.uniform(
minval=-0.5, maxval=0.5, shape=[self.test_set_size, self.num_features]
)
y_raw = tf.cast(tf.reduce_mean(x_raw, axis=1) > 0, dtype=tf.float32)
test_set = (
tf.data.Dataset.from_tensor_slices((x_raw, y_raw))
.map(norm)
.batch(self.test_set_size)
)
test_set_iterator = test_set.make_initializable_iterator()
self.test_initializer = test_set_iterator.initializer
x, y = test_set_iterator.get_next()
x = tf.reshape(x, [self.test_set_size, self.num_features])
y = tf.reshape(y, [self.test_set_size, 1])
return x, y
@property
def field_num(self):
return len(self.field_types)
class ModelOwner:
"""Contains code meant to be executed by a model owner Player."""
def __init__(self, player_name):
self.player_name = player_name
@tfe.local_computation
def receive_weights(self, *weights):
return tf.print("Weights on {}:".format(self.player_name), weights)
class PredictionClient:
"""Contains methods meant to be executed by a prediction client."""
def __init__(self, player_name, num_features):
self.player_name = player_name
self.num_features = num_features
@tfe.local_computation
def provide_input(self):
return tf.random.uniform(
minval=-0.5, maxval=0.5, dtype=tf.float32, shape=[1, self.num_features]
)
@tfe.local_computation
def receive_output(self, result):
return tf.print("Result on {}:".format(self.player_name), result)
| true
|
9543430deaca7d9104e5553dbd28fc6dd69f8d37
|
Python
|
rileyjmurray/cvxpy
|
/cvxpy/reductions/solvers/conic_solvers/copt_conif.py
|
UTF-8
| 12,853
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
This file is the CVXPY conic extension of the Cardinal Optimizer
"""
import numpy as np
import scipy.sparse as sp
import cvxpy.settings as s
from cvxpy.constraints import PSD, SOC
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers import utilities
from cvxpy.reductions.solvers.conic_solvers.conic_solver import (
ConicSolver,
dims_to_solver_dict,
)
def tri_to_full(lower_tri, n):
"""
Expands n*(n+1)//2 lower triangular to full matrix
Parameters
----------
lower_tri : numpy.ndarray
A NumPy array representing the lower triangular part of the
matrix, stacked in column-major order.
n : int
The number of rows (columns) in the full square matrix.
Returns
-------
numpy.ndarray
A 2-dimensional ndarray that is the scaled expansion of the lower
triangular array.
"""
full = np.zeros((n, n))
full[np.triu_indices(n)] = lower_tri
full += full.T
full[np.diag_indices(n)] /= 2.0
return np.reshape(full, n*n, order="F")
class COPT(ConicSolver):
"""
An interface for the COPT solver.
"""
# Solver capabilities
MIP_CAPABLE = True
SUPPORTED_CONSTRAINTS = ConicSolver.SUPPORTED_CONSTRAINTS + [SOC, PSD]
REQUIRES_CONSTR = True
# Only supports MI LPs
MI_SUPPORTED_CONSTRAINTS = ConicSolver.SUPPORTED_CONSTRAINTS
# Map between COPT status and CVXPY status
STATUS_MAP = {
1: s.OPTIMAL, # optimal
2: s.INFEASIBLE, # infeasible
3: s.UNBOUNDED, # unbounded
4: s.INF_OR_UNB, # infeasible or unbounded
5: s.SOLVER_ERROR, # numerical
6: s.USER_LIMIT, # node limit
7: s.OPTIMAL_INACCURATE, # imprecise
8: s.USER_LIMIT, # time out
9: s.SOLVER_ERROR, # unfinished
10: s.USER_LIMIT # interrupted
}
def name(self):
"""
The name of solver.
"""
return 'COPT'
def import_solver(self):
"""
Imports the solver.
"""
import coptpy # noqa F401
def accepts(self, problem):
"""
Can COPT solve the problem?
"""
if not problem.objective.args[0].is_affine():
return False
for constr in problem.constraints:
if type(constr) not in self.SUPPORTED_CONSTRAINTS:
return False
for arg in constr.args:
if not arg.is_affine():
return False
return True
@staticmethod
def psd_format_mat(constr):
"""
Return a linear operator to multiply by PSD constraint coefficients.
Special cases PSD constraints, as COPT expects constraints to be
imposed on solely the lower triangular part of the variable matrix.
"""
rows = cols = constr.expr.shape[0]
entries = rows * (cols + 1)//2
row_arr = np.arange(0, entries)
lower_diag_indices = np.tril_indices(rows)
col_arr = np.sort(np.ravel_multi_index(lower_diag_indices,
(rows, cols),
order='F'))
val_arr = np.zeros((rows, cols))
val_arr[lower_diag_indices] = 1.0
np.fill_diagonal(val_arr, 1.0)
val_arr = np.ravel(val_arr, order='F')
val_arr = val_arr[np.nonzero(val_arr)]
shape = (entries, rows*cols)
scaled_lower_tri = sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
idx = np.arange(rows * cols)
val_symm = 0.5 * np.ones(2 * rows * cols)
K = idx.reshape((rows, cols))
row_symm = np.append(idx, np.ravel(K, order='F'))
col_symm = np.append(idx, np.ravel(K.T, order='F'))
symm_matrix = sp.csc_matrix((val_symm, (row_symm, col_symm)))
return scaled_lower_tri @ symm_matrix
@staticmethod
def extract_dual_value(result_vec, offset, constraint):
"""
Extracts the dual value for constraint starting at offset.
Special cases PSD constraints, as per the COPT specification.
"""
if isinstance(constraint, PSD):
dim = constraint.shape[0]
lower_tri_dim = dim * (dim + 1) // 2
new_offset = offset + lower_tri_dim
lower_tri = result_vec[offset:new_offset]
full = tri_to_full(lower_tri, dim)
return full, new_offset
else:
return utilities.extract_dual_value(result_vec, offset, constraint)
def apply(self, problem):
"""
Returns a new problem and data for inverting the new solution.
Returns
-------
tuple
(dict of arguments needed for the solver, inverse data)
"""
data, inv_data = super(COPT, self).apply(problem)
variables = problem.x
data[s.BOOL_IDX] = [int(t[0]) for t in variables.boolean_idx]
data[s.INT_IDX] = [int(t[0]) for t in variables.integer_idx]
inv_data['is_mip'] = data[s.BOOL_IDX] or data[s.INT_IDX]
return data, inv_data
def invert(self, solution, inverse_data):
"""
Returns the solution to the original problem given the inverse_data.
"""
status = solution[s.STATUS]
attr = {s.SOLVE_TIME: solution[s.SOLVE_TIME],
s.NUM_ITERS: solution[s.NUM_ITERS],
s.EXTRA_STATS: solution['model']}
primal_vars = None
dual_vars = None
if status in s.SOLUTION_PRESENT:
opt_val = solution[s.VALUE] + inverse_data[s.OFFSET]
primal_vars = {inverse_data[COPT.VAR_ID]: solution[s.PRIMAL]}
if not inverse_data['is_mip']:
eq_dual = utilities.get_dual_values(
solution[s.EQ_DUAL],
self.extract_dual_value,
inverse_data[COPT.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution[s.INEQ_DUAL],
self.extract_dual_value,
inverse_data[COPT.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, attr)
else:
return failure_solution(status, attr)
def solve_via_data(self, data, warm_start: bool, verbose: bool, solver_opts, solver_cache=None):
"""
Returns the result of the call to the solver.
Parameters
----------
data : dict
Data used by the solver.
warm_start : bool
Not used.
verbose : bool
Should the solver print output?
solver_opts : dict
Additional arguments for the solver.
solver_cache: None
None
Returns
-------
tuple
(status, optimal value, primal, equality dual, inequality dual)
"""
import coptpy as copt
# Create COPT environment and model
envconfig = copt.EnvrConfig()
if not verbose:
envconfig.set('nobanner', '1')
env = copt.Envr(envconfig)
model = env.createModel()
# Pass through verbosity
model.setParam(copt.COPT.Param.Logging, verbose)
# Get the dimension data
dims = dims_to_solver_dict(data[s.DIMS])
# Treat cone problem with PSD part specially
rowmap = None
if dims[s.PSD_DIM]:
# Build cone problem data
c = data[s.C]
A = data[s.A]
b = data[s.B]
# Solve the dualized problem
rowmap = model.loadConeMatrix(-b, A.transpose().tocsc(), -c, dims)
model.objsense = copt.COPT.MAXIMIZE
else:
# Build problem data
n = data[s.C].shape[0]
c = data[s.C]
A = data[s.A]
lhs = np.copy(data[s.B])
lhs[range(dims[s.EQ_DIM], dims[s.EQ_DIM] + dims[s.LEQ_DIM])] = -copt.COPT.INFINITY
rhs = np.copy(data[s.B])
lb = np.full(n, -copt.COPT.INFINITY)
ub = np.full(n, +copt.COPT.INFINITY)
vtype = None
if data[s.BOOL_IDX] or data[s.INT_IDX]:
vtype = np.array([copt.COPT.CONTINUOUS] * n)
if data[s.BOOL_IDX]:
vtype[data[s.BOOL_IDX]] = copt.COPT.BINARY
lb[data[s.BOOL_IDX]] = 0
ub[data[s.BOOL_IDX]] = 1
if data[s.INT_IDX]:
vtype[data[s.INT_IDX]] = copt.COPT.INTEGER
# Build cone data
ncone = 0
nconedim = 0
if dims[s.SOC_DIM]:
ncone = len(dims[s.SOC_DIM])
nconedim = sum(dims[s.SOC_DIM])
nlinrow = dims[s.EQ_DIM] + dims[s.LEQ_DIM]
nlincol = A.shape[1]
diag = sp.spdiags(np.ones(nconedim), -nlinrow, A.shape[0], nconedim)
A = sp.csc_matrix(sp.hstack([A, diag]))
c = np.append(c, np.zeros(nconedim))
lb = np.append(lb, -copt.COPT.INFINITY * np.ones(nconedim))
ub = np.append(ub, +copt.COPT.INFINITY * np.ones(nconedim))
lb[nlincol] = 0.0
if len(dims[s.SOC_DIM]) > 1:
for dim in dims[s.SOC_DIM][:-1]:
nlincol += dim
lb[nlincol] = 0.0
if data[s.BOOL_IDX] or data[s.INT_IDX]:
vtype = np.append(vtype, [copt.COPT.CONTINUOUS] * nconedim)
# Load matrix data
model.loadMatrix(c, A, lhs, rhs, lb, ub, vtype)
# Load cone data
if dims[s.SOC_DIM]:
model.loadCone(ncone, None, dims[s.SOC_DIM],
range(A.shape[1] - nconedim, A.shape[1]))
# Set parameters
for key, value in solver_opts.items():
model.setParam(key, value)
solution = {}
try:
model.solve()
# Reoptimize if INF_OR_UNBD, to get definitive answer.
if model.status == copt.COPT.INF_OR_UNB and solver_opts.get('reoptimize', True):
model.setParam(copt.COPT.Param.Presolve, 0)
model.solve()
if dims[s.PSD_DIM]:
if model.haslpsol:
solution[s.VALUE] = model.objval
# Recover the primal solution
nrow = len(c)
duals = model.getDuals()
psdduals = model.getPsdDuals()
y = np.zeros(nrow)
for i in range(nrow):
if rowmap[i] < 0:
y[i] = -psdduals[-rowmap[i] - 1]
else:
y[i] = -duals[rowmap[i] - 1]
solution[s.PRIMAL] = y
# Recover the dual solution
solution['y'] = np.hstack((model.getValues(), model.getPsdValues()))
solution[s.EQ_DUAL] = solution['y'][0:dims[s.EQ_DIM]]
solution[s.INEQ_DUAL] = solution['y'][dims[s.EQ_DIM]:]
else:
if model.haslpsol or model.hasmipsol:
solution[s.VALUE] = model.objval
solution[s.PRIMAL] = np.array(model.getValues())
# Get dual values of linear constraints if not MIP
if not (data[s.BOOL_IDX] or data[s.INT_IDX]) and model.haslpsol:
solution['y'] = -np.array(model.getDuals())
solution[s.EQ_DUAL] = solution['y'][0:dims[s.EQ_DIM]]
solution[s.INEQ_DUAL] = solution['y'][dims[s.EQ_DIM]:]
except Exception:
pass
solution[s.SOLVE_TIME] = model.solvingtime
solution[s.NUM_ITERS] = model.barrieriter + model.simplexiter
if dims[s.PSD_DIM]:
if model.status == copt.COPT.INFEASIBLE:
solution[s.STATUS] = s.UNBOUNDED
elif model.status == copt.COPT.UNBOUNDED:
solution[s.STATUS] = s.INFEASIBLE
else:
solution[s.STATUS] = self.STATUS_MAP.get(model.status, s.SOLVER_ERROR)
else:
solution[s.STATUS] = self.STATUS_MAP.get(model.status, s.SOLVER_ERROR)
if solution[s.STATUS] == s.USER_LIMIT and model.hasmipsol:
solution[s.STATUS] = s.OPTIMAL_INACCURATE
if solution[s.STATUS] == s.USER_LIMIT and not model.hasmipsol:
solution[s.STATUS] = s.INFEASIBLE_INACCURATE
solution['model'] = model
return solution
| true
|
21fc8d786d56cd1ad6886086d16bba490c1f89dd
|
Python
|
demi52/mandy
|
/BI_6.0.7_WebUI_AUTOTOOLS_003/BI_6.0.7_WebUI_AUTOTOOLS_03/BI_6.0.7_WebUI_AUTOTOOLS_03/addtestcase/_addtestall_func.py
|
UTF-8
| 2,720
| 2.734375
| 3
|
[] |
no_license
|
#author='鲁旭'
"""
默认执行test_case 目录下的所有用例,可根据配置过滤
"""
import os
import re
import importlib
import unittest
from config.conf import Suite as s
def case_list(case_dir=s.case_dir, suite_dir=s.suite_dir):
"""
获取待执行的目录下的所有测试用例脚本
:param casedir: 测试用例所在目录
:param suite_dir: 测试套件所在的目录
:return: 返回所有测试用例脚本名列表
"""
pat = r"%s.+?py" % suite_dir
root_path = re.compile(pat).sub("", os.path.realpath(__file__))
case_path = "%s%s" % (root_path, case_dir)
test_case_modle = ""
for dirnow, dirs, files in os.walk(case_path):
for file in files:
if file.endswith("py") and file != "__init__.py":
test_case_modle += "\n%s/%s" % (dirnow, file)
test_case_modle = re.compile(r"\\|/").sub(".", test_case_modle)
for i in s.remove_dirs:
i = re.compile(r"\\|/").sub("\.", i)
if i not in ("", "*") and i != ".":
test_case_modle=re.compile(r".+?%s\..*?%s\..*?py" % (case_dir, i)).sub("", test_case_modle)
if i == "C3":
test_case_modle = re.compile(r".+?%s\..*?%s.*?py" % (case_dir, i) ).sub("", test_case_modle)
pat2 = r"%s.+?(?=\.py)" % (case_dir)
test_case_modle_list = re.findall(pat2, test_case_modle)
# print(test_case_modle_list)
return test_case_modle_list
def suite(**kwargs):
"""
添加用例目录树下,所有用例
:param casedir: 用例目录
:param suite_dir:当前文件的上级目录
:return:
"""
suites = unittest.TestSuite()
test_case_script_list = case_list(**kwargs)
#获取所有测试用例脚本文件
if test_case_script_list:
for test_case_name in test_case_script_list:
modle_name = importlib.import_module(test_case_name)
test_class_list=[ c for c in dir(modle_name) if c.startswith("Test")]
#获取当前用例脚本下的所有测试类
if test_class_list:
for test_class_name in test_class_list:
test_func_list = [f for f in dir(eval("modle_name.%s" % test_class_name)) if f.startswith("test_")]
#获取当前模块下,该测试类下的,所有测试函数
if test_func_list:
for test_func_name in test_func_list:
#添加测试函数
suites.addTest(eval("modle_name.%s('%s')" % (test_class_name, test_func_name)))
return suites
if __name__ == "__main__":
sui=suite()
[print( i) for i in list(iter(sui))]
print(len(list(iter(sui))))
| true
|
80b8b52eaef17deb0565aca80d66751eaa45e27e
|
Python
|
ryf1123/cpp-Compiler-for-Pascal-by-Python
|
/not_available/一些资料/ComPasc-master/project/src/ThreeAddrCode.py
|
UTF-8
| 4,225
| 3.078125
| 3
|
[] |
no_license
|
import os
import sys
# import SymTable as SymTab # Is it required ?
class ThreeAddrCode:
'''
Class holding the three address code, links with symbol table
'''
def __init__(self,symTab):
'''
args:
symTable: symbol table constructed after parsing
'''
self.code = []
self.jump_list = ["JMP","JL","JG","JGE","JLE","JNE","JE","JZ"]
self.binary_list = ["+","-","*","/","MOD","OR","AND","SHL","SHR","CMP"]
self.operator_list = ["UNARY","=","LOADREF","STOREREF","CALL","LABEL","PARAM","RETURN","RETRUNVAL","PRINT","SCAN"]
# This is for stack handling of all local variables of a function
self.tempToOffset = {}
self.symTab = symTab
def mapOffset(self):
#print self.symTab.localVals
for scope in self.symTab.table.keys():
offset = 0 # Begin at -4, as -4 is the base
scope_entry = self.symTab.table[scope]
func_name = scope_entry['Name']
self.tempToOffset[func_name] = {}
mapDick = self.tempToOffset[func_name]
width = 0
#print "Scope:",scope
# First adding the local variables
for var in scope_entry['Ident'].keys():
varEntry = self.symTab.Lookup(var, 'Ident')
if func_name != 'main':
if varEntry.parameter == False:
#print "Var in mapping, offset: ",var, offset
# First fetch the variables from the scope
mapDick[var] = offset
# Now upadate the offset
offset = offset - self.symTab.width(varEntry.typ, var)
varEntry.offset = offset
width = width + self.symTab.width(varEntry.typ, var)
#print "var : ", var, " , offset : ", str(offset)
# Now handling the temporaries.
for temp in self.symTab.localVals[func_name]:
#print "Temp in mapping, offset: ",temp, offset
objectVar = temp.split("_")
if len(objectVar) == 2:
# This local variable corresponds to an object variable
objName = objectVar[0]
varName = objectVar[1]
objEntry = self.symTab.Lookup(func_name + "_" + objName, 'Ident')
objOffset = objEntry.offset
for param in objEntry.params:
if param[0] == varName:
offset = objOffset + param[3]
mapDick[temp] = offset
break
offset = objOffset
continue
offset = offset - 4 # temporaries are size 4
mapDick[temp] = offset
width = width + 4
# This is for keeping the stack size for a local function
scope_entry['width'] = width
#print self.tempToOffset
def emit(self,op,lhs,op1,op2):
'''
Writes the proper 3AC code: removes strings from symbol table entries
'''
self.code.append([op,lhs,op1,op2])
def addlineNumbers(self):
for i,code in enumerate(self.code):
#print (code)
op, lhs, op1, op2 = code
self.code[i] = [str(i+1)] + code
def display_code(self):
'''
For pretty printing the 3AC code stored here
WARNING: Still not complete yet. self.code won't work. Has objects refering to symbol table
The point of this to finally emit all the code generated, in the way desired.
'''
for i, code in enumerate(self.code):
# print "In 3ADR, display: ",code
LineNumber, op, lhs, op1, op2 = code
if type(lhs) != type(""):
lhs = lhs.name
if type(op1) != type(""):
op1 = op1.name
if type(op2) != type(""):
op2 = op2.name
print ("#" + LineNumber + ", " + op + ", " + lhs + ", " + op1 + ", " + op2)
| true
|
b51a02f191698a1ce511c3cfc10ff87d7e9a2c78
|
Python
|
vnaveen0/practice_python
|
/String/reverseString.py
|
UTF-8
| 420
| 3.25
| 3
|
[] |
no_license
|
class Solution(object):
def reverseString(self, s):
"""
:type s: List[str]
:rtype: None Do not return anything, modify s in-place instead.
"""
L = len(s)
mid = L/2
for idx in range(mid):
# swap values
tmp = s[L-1-idx]
s[L-1-idx] = s[idx]
s[idx] = tmp
return s
| true
|
93a54baa33b7185171211be61652baa0581beaa9
|
Python
|
christopherohit/Guess-Number
|
/Intro.py
|
UTF-8
| 2,461
| 3.734375
| 4
|
[] |
no_license
|
import sys
import Menu
def Intro():
while True:
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("~ //=== __ __ ____ ___ ___ || ==== ~")
print("~ || === || || ||__ ||__ ||__ || || ~")
print("~ || || || || || || || || || ~")
print("~ \\\_|| \\\__// ||__ __|| __|| || || ~")
print(" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ")
print("\n About this game:")
print("After a stressful and tiring working time, you need to rest")
print("because you are too tired but can't sleep because of")
print("the stress in your body. So you need to find a form of entertainment")
print("that helps you relieve stress, focus your mind and develop your predictive ability.")
print("Well this is the game created to help you in that. Briefly about the game:")
print("The game includes many different modes with increasing levels and the ability")
print("to solve problems also decreases, but the rules of play are unchanged.")
print("Specifically, you will have n turns corresponding to the difficulty you have chosen and")
print("with your super brain you can deploy levels of algorithms to solve the given problem")
print("within a given limit or use spiritual elements such as grandparents, guardian ancestors, prophesies of the universe,")
print("space-time machine, holder of time, decider of numbers, saint of")
print("loops, destroyer of Pythagoras, lord of only In general, you can use any method to guess the")
print("number you need to find in a certain number of turns, you win or you lose, don't say much Ok")
print("\n")
Agreed = input("Do you Agreed with us (y/n)")
if Agreed not in ("Y" , "N" , "y" , "n"):
print("Invalid submit. Please check again !!!")
continue
elif Agreed == "Y" or Agreed == "y":
Menu.Menu()
elif Agreed == "N" or Agreed == "n":
sys.exit()
def Continue():
print ("Do you want continue game ? (y/n)")
select = input()
if select == "y" or select == "Y":
return Menu.Menu()
elif select == "n" or select == "N":
return -1
else:
print ("Invalid answer")
return Continue()
| true
|
509ef67931916af047250456cc83d9b0f6b0a4e9
|
Python
|
pym7857/CodingTestStudy
|
/2020 KaKao Blind Recruitment/pang/괄호변환.py
|
UTF-8
| 986
| 3.28125
| 3
|
[] |
no_license
|
def split(p):
if p=='':
return ''
else:
count=0
for i,n in enumerate(p):
if n==")":
count-=1
else:
count+=1
if count==0:
break
return p[:i+1],p[i+1:]
def checkTrue(u):
count=0
for i in u:
if i=='(': #괄호가 열리면, 닫힌다.
count+=1
else:
count-=1
if count < 0:
return False
return True
def makeTrue(s):
try:
u,v=split(s)
except:
return ''
answer=''
if checkTrue(u):
answer+=u
answer+=makeTrue(v)
return answer
else:
answer+='('
answer+=makeTrue(v)
answer+=')'
u=u[1:-1]
for i in u:
if i=='(':
answer+=')'
else:
answer+='('
return answer
def solution(p):
answer = makeTrue(p)
return answer
| true
|
5f8b46af6ec7c3f5eea16474b2826b1d73b5e6e5
|
Python
|
brunoisy/kaggle_quora
|
/model_2.py
|
UTF-8
| 992
| 2.734375
| 3
|
[] |
no_license
|
import ktrain
import pandas as pd
from sklearn.model_selection import train_test_split
MODEL_NAME = 'distilbert-base-uncased'
TRAINING_DATA_FILE = "data/train.csv"
max_qst_length = 100 # max number of words in a question to use
###
# data preparation
train = pd.read_csv(TRAINING_DATA_FILE)[:1000]
ids = train['qid'].values
X = train['question_text'].values
y = train['target'].values
print("accuracy baseline : ", 1 - round(sum(y) / len(y), 3), "% of questions are sincere")
ids_train, ids_test, X_train, X_test, y_train, y_test = train_test_split(ids, X, y, test_size=0.2, random_state=2020)
del X, y # save RAM
transformer = ktrain.text.Transformer(MODEL_NAME, maxlen=max_qst_length, class_names=[0, 1])
data_train = transformer.preprocess_train(X_train, y_train)
data_test = transformer.preprocess_test(X_test, y_test)
model = transformer.get_classifier()
learner = ktrain.get_learner(model, train_data=data_train, val_data=data_test, batch_size=6)
learner.fit_onecycle(5e-5, 4)
| true
|
467d6cbb5ff27446b5f91d0a73080c19fd97ef0c
|
Python
|
Erotemic/netharn
|
/netharn/layers/attention.py
|
UTF-8
| 6,542
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
References:
https://arxiv.org/pdf/1809.02983.pdf - Dual Attention Network for Scene Segmentation
https://raw.githubusercontent.com/heykeetae/Self-Attention-GAN/master/sagan_models.py
"""
import torch
from torch import nn
class SelfAttention(nn.Module):
"""
Self Attention Layer
References:
"""
def __init__(self, in_channels):
super(SelfAttention, self).__init__()
self.chanel_in = in_channels
self.query_conv = nn.Conv2d(in_channels=in_channels,
out_channels=in_channels // 8,
kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_channels,
out_channels=in_channels // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_channels,
out_channels=in_channels, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
Args :
x (Tensor): input feature maps (B x C x W x H)
Returns :
out : self attention value + input feature
attention: B x N x N (N is Width*Height)
"""
B, C, W, H = x.shape
N = W * H
proj_query = self.query_conv(x).view(B, -1, N).permute(0, 2, 1) # B x C x(N)
proj_key = self.key_conv(x).view(B, -1, N) # B x C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # B x (N) x (N)
proj_value = self.value_conv(x).view(B, -1, N) # B x C x N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, W, H)
out = self.gamma * out + x
return out, attention
class ChannelAttention(nn.Module):
"""
Channel attention module
The channel attention module selectively emphasizes interdependent channel
maps by integrating associated features among all channel map.
Uses the uncentered scatter matrix (i.e. M @ M.T) to compute a unnormalized
correlation-like matrix between channels.
I think M @ M.T is an "uncentered scatter matrix"
https://stats.stackexchange.com/questions/164997/relationship-between-gram-and-covariance-matrices
not sure if this is the right term
References:
https://arxiv.org/pdf/1809.02983.pdf - Dual Attention Network for Scene Segmentation
https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py
Notes:
Different from the position attention module, we directly calculate
the channel attention map from the original features.
Noted that we do not employ convolution layers to embed features
before computing relationshoips of two channels, since it can maintain
relationship between different channel maps. In addition, different
from recent works [Zhang CVPR 2018 Context encoding for semantic
segmentation] which explores channel relationships by a global pooling
or encoding layer, we exploit spatial information at all corresponding
positions to model channel correlations
Ignore:
>>> # Simple example to demonstrate why a multiplicative parameter
>>> # at zero might or might not deviate to decrease the loss
>>> x = torch.randn(10)
>>> x[0] = -1000
>>> p = nn.Parameter(torch.zeros(1) + 1e-1)
>>> optim = torch.optim.SGD([p], lr=1e-1)
>>> for i in range(10):
>>> loss = (x * (p ** 2)).sum()
>>> loss.backward()
>>> print('loss = {!r}'.format(loss))
>>> print('p.data = {!r}'.format(p.data))
>>> print('p.grad = {!r}'.format(p.grad))
>>> optim.step()
>>> optim.zero_grad()
>>> # at zero might or might not deviate to decrease the loss
>>> x = torch.randn(2)
>>> x[0] = -1000
>>> p = nn.Parameter(torch.zeros(1))
>>> optim = torch.optim.SGD([p], lr=1e-1)
>>> for i in range(10):
>>> loss = (x * p.clamp(0, None)).sum()
>>> loss.backward()
>>> print('loss = {!r}'.format(loss))
>>> print('p.data = {!r}'.format(p.data))
>>> print('p.grad = {!r}'.format(p.grad))
>>> optim.step()
>>> optim.zero_grad()
Ignore:
>>> B, C, H, W = 1, 3, 5, 7
>>> inputs = torch.rand(B, C, H, W)
>>> inputs = torch.arange(B * C * H * W).view(B, C, H, W).float()
>>> self = ChannelAttention(C)
>>> optim = torch.optim.SGD(self.parameters(), lr=1e-8)
>>> for i in range(10):
>>> out = self(inputs)
>>> loss = (out.sum() ** 2)
>>> print('self.gamma = {!r}'.format(self.gamma))
>>> print('loss = {!r}'.format(loss))
>>> loss.backward()
>>> optim.step()
>>> optim.zero_grad()
"""
def __init__(self, in_channels, attend_elsewhere=True):
super(ChannelAttention, self).__init__()
self.in_channels = in_channels
# hack to rectify the definiton in the paper with the implementaiton
self.attend_elsewhere = attend_elsewhere
# scale parameter (beta from paper)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, inputs):
"""
Args:
inputs (Tensor): input feature maps (B, C, H, W)
Returns:
out (Tensor): attention value + input feature
attention: (B, C, C)
Example:
>>> B, C, H, W = 1, 3, 5, 7
>>> inputs = torch.rand(B, C, H, W)
>>> self = ChannelAttention(C)
"""
B, C, H, W = inputs.shape
# Flatten spatial dims
proj_query = inputs.view(B, C, -1) # A
proj_key = inputs.view(B, C, -1).permute(0, 2, 1) # A.T
proj_value = inputs.view(B, C, -1) # A
energy = torch.bmm(proj_query, proj_key) # A @ A.T
if self.attend_elsewhere:
# Why the subtraction here?
diag = torch.max(energy, dim=1, keepdim=True)[0].expand_as(energy)
energy_new = diag - energy
attention = energy_new.softmax(dim=1)
else:
attention = energy.softmax(dim=1)
out = torch.bmm(attention, proj_value)
out = out.view(B, C, H, W)
residual = self.gamma * out
out = residual + inputs
return out
| true
|
9816894b2e45fa4e949701ce38003a0059d6c127
|
Python
|
mprasu/Sample-Projects
|
/pythonprojects/Pyhton poc's/retail poc/trans.py
|
UTF-8
| 1,652
| 2.875
| 3
|
[] |
no_license
|
import MySQLdb
import csv
db=MySQLdb.connect("localhost","root","root","charan")
cursor=db.cursor()
fo=None
fo=open("trans.csv")
trans=csv.reader(fo)
data=list(trans)
sql="DROP TABLE IF EXISTS transactions"
cursor.execute(sql)
sql="create table transactions(id int,chain int,dept int,category int,company int,brand int,dt char(20),productsize float,productmeasure char(20),purchasequantity int,purchaseamount float)"
cursor.execute(sql)
for rec in data:
sql="INSERT INTO transactions(id,chain,dept,category,company,brand,dt,productsize,productmeasure,purchasequantity,purchaseamount)VALUES('%d','%d','%d','%d','%d','%d','%s','%f','%s','%d','%f')"%(int(rec[0]),int(rec[1]),int(rec[2]),int(rec[3]),int(rec[4]),int(rec[5]),str(rec[6]),float(rec[7]),str(rec[8]),int(rec[9]),float(rec[10]))
cursor.execute(sql)
db.commit()
def top_2_customers():
sql="select id,sum(purchaseamount)as custspendings from transactions group by id order by custspendings desc limit 2"
cursor.execute(sql)
res=cursor.fetchall()
print res
def top_2_brands():
sql="select brand,sum(purchaseamount)as custspendings from transactions group by brand order by custspendings desc limit 2"
cursor.execute(sql)
res=cursor.fetchall()
print res
def chain_wise_sales():
sql="select chain,sum(purchaseamount),sum(purchasequantity) from transactions group by chain"
cursor.execute(sql)
res=cursor.fetchall()
print res
print("enter \n 1 for top two customers\n 2 for top two brands\n 3 for chain wise sales")
s=raw_input("enter choice ")
if s=="1":
top_2_customers()
elif s=="2":
top_2_brands()
elif s=="3":
chain_wise_sales()
else:
print("enter a valid choice")
db.close()
| true
|
c5607288a678c183e4c0893c9d1795248737b279
|
Python
|
sound-round/python-project-lvl2
|
/gendiff/file_parser.py
|
UTF-8
| 385
| 2.921875
| 3
|
[] |
no_license
|
import json
import yaml
INPUT_FORMATS = ['json', 'yaml']
def parse(file_data, format):
if format == 'json':
return json.load(file_data)
if format in ['yaml', 'yml']:
return yaml.load(file_data, Loader=yaml.FullLoader)
raise ValueError(
f'Unknown input format: {format}. '
f'Choose from {", ".join(INPUT_FORMATS)}.'
)
| true
|
58d3a85bcf64a4e2f4d37f704f2bc3d29bd1b40d
|
Python
|
Kederly84/PyhonBasics
|
/HomeWork/HomeWork1.py
|
UTF-8
| 1,222
| 3.984375
| 4
|
[] |
no_license
|
# задание №1
# Запрос ввода от пользователя в секундах
duration = int(input('Веди время в секундах'))
days = duration // 86400 # 86400 - это количество секунд в сутках, вычисляем кол-во суток
hours = (duration - days * 86400) // 3600 # 3600 количество секунд в часе, вычисляем кол-во часов
minutes = (duration - days * 86400 - hours * 3600) // 60 # Вычисляем кол-во минут
seconds = duration - days * 86400 - hours * 3600 - minutes * 60 # Вычисляем оставшиеся секунды
# Вывод информации в зависимости от данных, введенных пользователем
# без лишних сущностей
if duration >= 86400:
print('Вы ввели', days, 'дн', hours, 'час', minutes, 'мин', seconds, 'сек')
elif 3600 <= duration < 86400:
print('Вы ввели', hours, 'час', minutes, 'мин', seconds, 'сек')
elif 60 <= duration < 3600:
print('Вы ввели', minutes, 'мин', seconds, 'сек')
else:
print('Вы ввели', duration, 'сек')
| true
|
9fc8337ed3a160826752311bc90da11d4b957d65
|
Python
|
Joldnine/know-your-house
|
/back-end/apis/prediction/api.py
|
UTF-8
| 2,235
| 2.703125
| 3
|
[] |
no_license
|
import os
import pickle
import json
import sklearn
import numpy
import scipy
class Prices(object):
def predict_price(self, town_area, flat_type, time_step, floor_area_sqm, age, floor, mrt_distance, num_mall, num_mrt,
num_school):
pkl_filename = "model.pkl"
des_filename = town_area + ' ' + flat_type + '.pkl'
directory = "./pkl dataset"
for file in os.listdir(directory):
filename = file # os.fsdecode(
if filename == des_filename:
pkl_filename = filename
else:
continue
if pkl_filename == des_filename:
with open(directory + "/" + pkl_filename, 'rb') as file:
pickle_model = pickle.load(file)
Xtest = [[time_step, floor_area_sqm, age, floor, mrt_distance, num_mall, num_mrt, num_school]]
#score = pickle_model.score(Xtest, Ytest)
#print("Test score: {0:.2f} %".format(100 * score))
Ypredict = pickle_model.predict(Xtest)
return Ypredict[0][0]
else:
return 0
def post(event, context):
# event = json.loads(event['body']) # comment this line if it is in aws
town_area = event['town_area']
flat_type = event['flat_type']
time_step = 0
floor_area_sqm = event['area_sqm']
age = event['age']
floor = event['floor']
mrt_distance = event['mrt_distance']
num_mall = event['num_mall']
num_mrt = event['num_mrt']
num_school = event['num_school']
predict = Prices().predict_price(town_area, flat_type, time_step, floor_area_sqm, age, floor, mrt_distance,
num_mall, num_mrt, num_school)
print(predict)
return {
"body": json.dumps({
"price": predict
}),
"statusCode": 200
}
if __name__ == "__main__":
town_areas = "ANG MO KIO"
flat_types = "3 ROOM"
time_step = 0
floor_area_sqm = 122.0
age = 19
floor = 3.0
mrt_distance = 0.7
num_mall = 3
num_mrt = 2
num_school = 2
predict = Prices().predict_price(town_areas,flat_types,time_step,floor_area_sqm,age,floor,mrt_distance,num_mall,num_mrt,num_school)
print(predict)
| true
|
91ff6041f816c04425b46634c3bfd0d13041cdbe
|
Python
|
StarBrand/CC5114-Tareas
|
/tarea1/scripts/network_on_iris.py
|
UTF-8
| 3,416
| 2.953125
| 3
|
[] |
no_license
|
"""network_on_iris.py: show performance of a neural network on iris dataset"""
import matplotlib.pyplot as plt
import numpy as np
import logging
from argparse import ArgumentParser
from random import seed
from neural_network import NeuralNetwork, NormalizedNetwork
from useful.math_functions import sigmoid, tanh
from useful.preprocess_dataset import import_data, one_hot_encoding
from useful.results import StandardTrainer, KFoldTrainer
from useful.results import confusion_matrix, accuracy, precision, recall, f1_score, show_matrix
FIG_SIZE = (20 * 2, 20)
TITLE_SIZE = 40
FONT_SIZE = 25
TRAIN_SIZE = 0.7
LR = 0.01
N = int(1e4)
np.random.seed(2)
seed(2)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = ArgumentParser()
parser.add_argument("-n", "--normalize", default=False, action="store_true")
parser.add_argument("-x", "--cross_validation", type=int)
args = parser.parse_args()
# Initialize network
network = NeuralNetwork(4, [6], 3, [tanh, sigmoid], LR)
filename = "network"
type_net = "Neural"
k_fold = ""
if args.normalize:
network = NormalizedNetwork(4, [6], 3, [tanh, sigmoid], LR)
type_net = "Normalized"
filename = type_net.lower()
# iris dataset
dataset = import_data("../../data/iris.data")
labels, encoding = one_hot_encoding(dataset[-1])
classes = list(encoding.keys())
dataset = dataset[0:-1]
# Define Trainer
trainer = StandardTrainer(dataset, labels.T, TRAIN_SIZE)
k = 1
if args.cross_validation is not None:
k = args.cross_validation
k_fold = "_{}fold".format(k)
trainer = KFoldTrainer(k, 2, dataset, labels.T)
fig = plt.figure(figsize=FIG_SIZE)
fig.subplots_adjust(wspace=0.3)
ax = fig.add_subplot(121)
ax2 = ax.twinx()
ax3 = fig.add_subplot(122)
lines = []
c_m = np.array([])
iteration = ""
for i in range(k):
trained, (learn, costs) = trainer.train(network, epochs=N, repeat=True)
prediction = trainer.evaluate(trained)
if c_m.shape != (0, ):
c_m = c_m + confusion_matrix(prediction, trainer.get_labels())
else:
c_m = confusion_matrix(prediction, trainer.get_labels())
line = ax.plot(learn, label="Learning Curve", linewidth=2.5)
if k != 1:
iteration = " iteration: {}".format(i + 1)
c = line[0].get_color()
else:
c = "r"
line2 = ax2.plot(costs, label="MSE{}".format(iteration), linestyle="--", linewidth=2.5, c=c)
lines = lines + line + line2
ax.set_ylabel("Learning Curve", fontsize=FONT_SIZE)
ax.set_xlabel("Epochs", fontsize=FONT_SIZE)
ax.set_title("{} Network on Iris\n".format(type_net), fontsize=TITLE_SIZE)
ax.grid()
ax2.set_ylabel("Cost", fontsize=FONT_SIZE)
ax2.grid()
labels = [l.get_label() for l in lines]
ax2.legend(lines, labels, fontsize=FONT_SIZE, loc="center right")
show_matrix(ax3, c_m, (classes, ["Predicted\n{}".format(iris) for iris in classes]),
"Confusion Matrix of Test Set\n", FONT_SIZE, TITLE_SIZE)
print("Accuracy:\t{}".format(accuracy(c_m)))
print("Precision:\t{}".format(precision(c_m)))
print("Recall:\t{}".format(recall(c_m)))
print("f1-score:\t{}".format(f1_score(c_m)))
plt.savefig("../results/{}_on_iris{}.png".format(filename, k_fold))
| true
|
3c9afc95a9999d1e2542c09646bb96ec5a107bf4
|
Python
|
gsk120/ibk_python_progrmming
|
/mycode/lab/2age_cal.py
|
UTF-8
| 612
| 4.0625
| 4
|
[] |
no_license
|
"""
나이 = 현재년도 - 태어난년도 + 1
태어난 년도는 input() 함수를 사용하여 입력 받는다.
"""
#from 모듈명 import 클래스명 또는 함수명
from datetime import datetime as dt
print(dt.today())
print(dt.today().year)
print(dt.today().month)
current_year = dt.today().year
print("태어난 년도를 입력하세요")
birth_year = int(input())
print(current_year, birth_year)
age = current_year - birth_year + 1
if 17 <= age < 20:
print('고등학생입니다.')
elif (20 <= age) and (age <= 27):
print('대학생입니다.')
else:
print('학생이 아닙니다.')
| true
|
f4cc0761578e4501650636dd7dee74c877ee9f5b
|
Python
|
Shin-jay7/LeetCode
|
/0451_sort_characters_by_frequency.py
|
UTF-8
| 419
| 3.359375
| 3
|
[] |
no_license
|
from __future__ import annotations
from collections import Counter
class Solution:
def frequencySort(self, s: str) -> str:
ans = ""
for char, freq in Counter(s).most_common():
ans += char * freq
return ans
test = Solution()
test.frequencySort("tree") # "eert"
test = Solution()
test.frequencySort("cccaaa") # "aaaccc"
test = Solution()
test.frequencySort("Aabb") # "bbAa"
| true
|
0c83b592d6d164347ac6e642cdd7ad0f3f5bd4c4
|
Python
|
tsb4/dayTradingEnv
|
/gym_anytrading/envs/trading_env.py
|
UTF-8
| 5,178
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
import gym
from gym import spaces
from gym.utils import seeding
import pandas as pd
import numpy as np
from enum import Enum
import matplotlib.pyplot as plt
import csv
import gym_anytrading.datasets.b3 as b3
class TradingEnv(gym.Env):
def __init__(self):
self.n_stocks = 10
self.W = 2
self.count = 0
self.count_episodes = -1
self.max_steps = 5
#self.action = [1/(self.n_stocks+1)]*(self.n_stocks+1)
self.state = None
csv_filename = '../../../gym_anytrading/datasets/data/B3_COTAHIST.csv'
#csv_filename = 'gym_anytrading/datasets/data/B3_COTAHIST.csv'
self.df = pd.read_csv(csv_filename, parse_dates=True, index_col='Date')
#print(self.df.head())
## spaces
self.action_space = spaces.Box(low=0, high=1.0, shape=(self.n_stocks+1,), dtype=np.float32)
self.observation_space = spaces.Box(low=0.0, high=10.0, shape=((self.W+1)*(self.n_stocks+1), ), dtype=np.float32)
self.beta = 1
def seed(self, seed=None):
pass
def reset(self):
self.count = 0
self.count_episodes += 1
return self.receive_state().flatten()
#self._done = False
#self._current_tick = self._start_tick
#self._last_trade_tick = self._current_tick - 1
#self._position = Positions.Short
#self._position_history = (self.window_size * [None]) + [self._position]
#self._total_reward = 0.
#self._total_profit = 1. # unit
#self._first_rendering = True
#self.history = {}
#return self._get_observation()
#pass
def normalizeAction(self, action):
new_action = []
action = np.array(action)
for i in action: #range(len(action)):
new_action.append(i/action.sum())
#print(new_action, np.array(new_action).sum())
return new_action
def receive_state(self):
state = []
#print("AQUI.......")
for j in range(self.W, -1, -1):
start_point =self.n_stocks*self.W + self.count_episodes*self.max_steps*self.n_stocks + (self.count-j)*self.n_stocks
df_new = self.df.iloc[start_point:start_point+10]
df_new = df_new.iloc[:,[1,4]]
#print(self.count, df_new)
obs = [1]
for i in range(self.n_stocks):
#print(line)
obs.append(df_new.iloc[i, 1]/df_new.iloc[i, 0])
#print(obs)
state.append(np.array(obs))
#print(np.array(state))
return np.array(state)
#start_point = self.count_episodes*self.max_steps*self.n_stocks + self.count*self.n_stocks
#df_new = self.df.iloc[start_point:start_point+10]
#df_new = df_new.iloc[:,[1,4]]
#print(self.count, df_new)
#obs = [1]
#for i in range(self.n_stocks):
# #print(line)
# obs.append(df_new.iloc[i, 1]/df_new.iloc[i, 0])
#print(obs)
#state.append(obs)
#self.holdings = self.holdings -
#new_action = normalizeAction(action)
return []
def calculate_reward(self, action):
#self.state = self.observation_space.sample()
#print(self.state)
reward = self.beta*np.dot(self.state[-1], action)
done = False
if(self.count>=self.max_steps):
done = True
#print("REWARD ", reward)
return reward, done
#valueOfHolding = data["Close"]
#self.portifolio = valueOfHolding*self.holdings
def step(self, action):
action = self.normalizeAction(action)
self.state = self.receive_state()
#print(state)
self.count +=1
reward, done = self.calculate_reward(action)
#self.history.insert(0, [self.count, state, reward])
#if(len(self.history)>3):
# self.history.pop(3)
#print(self.history[0][1])
#self._done = False
#self._current_tick += 1
#if self._current_tick == self._end_tick:
# self._done = True
#step_reward = self._calculate_reward(action)
#self._total_reward += step_reward
#self._update_profit(action)
#trade = False
#if ((action == Actions.Buy.value and self._position == Positions.Short) or
# (action == Actions.Sell.value and self._position == Positions.Long)):
# trade = True
#if trade:
# self._position = self._position.opposite()
# self._last_trade_tick = self._current_tick
#self._position_history.append(self._position)
#observation = self._get_observation()
#info = dict(
# total_reward = self._total_reward,
# total_profit = self._total_profit,
# position = self._position.value
#)
#self._update_history(info)
return self.state.flatten(), reward, done, []
def readData(self):
ficheiro = open('gym_anytrading/datasets/data/STOCKS_AMBEV.csv', 'r')
reader = csv.DictReader(ficheiro, delimiter = ',')
#print(reader)
#for linha in reader:
# print (linha["Close"])
return reader
| true
|
8a5ca5dc204978cb3cf4500fa3aa77b432ab271f
|
Python
|
mchrzanowski/ProjectEuler
|
/src/python/Problem060.py
|
UTF-8
| 2,922
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
'''
Created on Feb 5, 2012
@author: mchrzanowski
'''
from ProjectEulerPrime import ProjectEulerPrime
from time import time
def find5WayPrimes(primeList, primeObject):
for a in xrange(len(primeList) - 4):
first = str(primeList[a])
for b in xrange(a + 1, len(primeList) - 3):
second = str(primeList[b])
if primeObject.isPrime(first + second) and primeObject.isPrime(second + first):
for c in xrange(b + 1, len(primeList) - 2):
third = str(primeList[c])
if primeObject.isPrime(first + third) and primeObject.isPrime(third + first) \
and primeObject.isPrime(third + second) and primeObject.isPrime(second + third):
for d in xrange(c + 1, len(primeList) - 1):
fourth = str(primeList[d])
if primeObject.isPrime(first + fourth) and primeObject.isPrime(fourth + first) \
and primeObject.isPrime(fourth + second) and primeObject.isPrime(second + fourth) \
and primeObject.isPrime(fourth + third) and primeObject.isPrime(third + fourth):
for e in xrange(d + 1, len(primeList)):
fifth = str(primeList[e])
if primeObject.isPrime(fifth + first) and primeObject.isPrime(first + fifth) \
and primeObject.isPrime(fifth + second) and primeObject.isPrime(second + fifth) \
and primeObject.isPrime(fifth + third) and primeObject.isPrime(third + fifth) \
and primeObject.isPrime(fifth + fourth) and primeObject.isPrime(fourth + fifth):
return [int(first), int(second), int(third), int(fourth), int(fifth)]
return None
def main():
start = time()
primeObject = ProjectEulerPrime()
LIMIT = 10000 # setting a limit too high enables finding 5-way pairs that have huge last numbers (eg, 20000)
# 10,000 found through trial and error to be sufficient.
primeList = [x for x in xrange(LIMIT) if primeObject.isPrime(x)]
solutionList = find5WayPrimes(primeList, primeObject)
print "Solutions: ", solutionList
print "Sum: ", sum(solutionList)
end = time()
print "Runtime: ", end - start, " seconds."
if __name__ == '__main__':
main()
| true
|
4ac7c9112965fc6ac403f9badfd87889a2359658
|
Python
|
imaimon1/Learn-Python-the-Hard-Way
|
/ex3.py
|
UTF-8
| 276
| 3.53125
| 4
|
[] |
no_license
|
print "I will now count my chickens:"
print "Hens", 25.+30./6.
print "Roosters", 100.-25.*3.%4.
print "Now I will count the eggs"
print 3.+2.+1.-5.+4.%2.-1./4.+6.
print "Is it true that 3+2< 5-7?"
print 3.+2.<5.-7.
print "what is 3+2",3.+2.
#more boring stuff
| true
|
e65976723046095cbe711bd1c8a7c425775d21f8
|
Python
|
timchu/myanimelist-scraper
|
/scraper.py
|
UTF-8
| 3,843
| 3.203125
| 3
|
[] |
no_license
|
"""A scraper to identify shared voice actors/actresses in myanimelist."""
from lxml import html
import requests
import sys
from os import path
from urlparse import urlparse
# """ Takes as input a page, and outputs a list of (actor, character). """
# def getChars(tree):
# char_list = tree.xpath('//td/a[contains(@href, "/character/")]')
# chars = [s.text for s in char_list]
# return chars
def getLanguage(actor_html):
return actor_html.getparent().getchildren()[2].text
""" Gets the list of Japanese actors from a page."""
def getJActorsHtml(page):
tree = html.fromstring(page.text)
eng_and_jap_actors_html = tree.xpath('//td/a[contains(@href, "/people/")]')
return [e for e in eng_and_jap_actors_html if getLanguage(e) == 'Japanese']
""" Gets the character HTML from an actor HTML."""
def getChar(actor_html):
common_root_html = actor_html
for i in range(5):
common_root_html = common_root_html.getparent()
return common_root_html.getchildren()[1].getchildren()[0]
# output: actor : [chars played by actor]
# adds to an existing acmap
def getActorCharMap(page, acmap, title):
tree = html.fromstring(page.text)
for actor_html in getJActorsHtml(page):
name = actor_html.text
char = getChar(actor_html).text
if name not in acmap:
acmap[name] = {title: [char]}
elif title not in acmap[name]:
acmap[name][title] = [char]
else:
acmap[name][title].append(char)
def retryRequestGet(url, times=3):
for i in xrange(times):
page = requests.get(url)
if page.status_code == 200:
return page
raise RuntimeError('Could not get url {}'.format(url))
# output: {actor : { title : characters played in title}}
def getActorCharacterMap(urls, anime_titles):
acmap = {}
for i in xrange(len(urls)):
title = anime_titles[i]
page = retryRequestGet(urls[i])
getActorCharMap(page, acmap, title)
return acmap
# counts the number of keys in a map
def numKeys(m):
keyCount = 0
for key in m:
keyCount += 1
return keyCount
# removes keys in a map whos value is a map with <= 1 key.
def pruneMap(mapOfMaps):
pruned_map = {}
for key in mapOfMaps:
if numKeys(mapOfMaps[key]) > 1:
pruned_map[key] = mapOfMaps[key]
return pruned_map
# Some formatting on the output.
def printMap(m):
for i in m:
printMap2(m[i])
print "Voiced By: (", i, ")"
print ""
def printMap2(m):
for i in m:
print m[i], " : ", i
# Helper function to get the anime title from the list of URLs.
def getAnimeName(a_url):
return a_url.split('/')[-2].replace('_', ' ')
def printUsageAndExit():
print '''Usage: python {prog} [anime url] [anime url] [anime url] ...
Example: python {prog} 'http://myanimelist.net/anime/2001/Tengen_Toppa_Gurren_Lagann' 'http://myanimelist.net/anime/5114/Fullmetal_Alchemist__Brotherhood' '''.format(prog=sys.argv[0])
sys.exit(1)
def validateMALUrl(url):
try:
p = urlparse(url)
assert p.scheme == 'http'
assert p.netloc == 'myanimelist.net'
path_parts = p.path.split('/')
# '', 'anime', '10165', 'Nichijou'
assert len(path_parts) == 4
assert path_parts[1] == 'anime'
# check for integer anime id
int(path_parts[2])
except AssertionError, ValueError:
raise AssertionError('{} is not a proper MAL url'.format(url))
def scrape(anime_urls):
for url in anime_urls:
validateMALUrl(url)
print anime_urls, '\n'
character_urls = [path.join(url, 'characters') for url in anime_urls]
anime_titles = [getAnimeName(url) for url in character_urls]
prunedMap = pruneMap(getActorCharacterMap(character_urls, anime_titles))
printMap(prunedMap)
def main():
if len(sys.argv) <= 1:
printUsageAndExit()
anime_urls = sys.argv[1:]
scrape(anime_urls)
if __name__ == '__main__':
main()
| true
|
e81318fa0a4930a4c98adf1a7ff6784eb90fcb7a
|
Python
|
statsmodels/statsmodels
|
/statsmodels/tsa/arima/estimators/innovations.py
|
UTF-8
| 9,639
| 2.609375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
Innovations algorithm for MA(q) and SARIMA(p,d,q)x(P,D,Q,s) model parameters.
Author: Chad Fulton
License: BSD-3
"""
import warnings
import numpy as np
from scipy.optimize import minimize
from statsmodels.tools.tools import Bunch
from statsmodels.tsa.innovations import arma_innovations
from statsmodels.tsa.stattools import acovf, innovations_algo
from statsmodels.tsa.statespace.tools import diff
from statsmodels.tsa.arima.specification import SARIMAXSpecification
from statsmodels.tsa.arima.params import SARIMAXParams
from statsmodels.tsa.arima.estimators.hannan_rissanen import hannan_rissanen
def innovations(endog, ma_order=0, demean=True):
"""
Estimate MA parameters using innovations algorithm.
Parameters
----------
endog : array_like or SARIMAXSpecification
Input time series array, assumed to be stationary.
ma_order : int, optional
Maximum moving average order. Default is 0.
demean : bool, optional
Whether to estimate and remove the mean from the process prior to
fitting the moving average coefficients. Default is True.
Returns
-------
parameters : list of SARIMAXParams objects
List elements correspond to estimates at different `ma_order`. For
example, parameters[0] is an `SARIMAXParams` instance corresponding to
`ma_order=0`.
other_results : Bunch
Includes one component, `spec`, containing the `SARIMAXSpecification`
instance corresponding to the input arguments.
Notes
-----
The primary reference is [1]_, section 5.1.3.
This procedure assumes that the series is stationary.
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
spec = max_spec = SARIMAXSpecification(endog, ma_order=ma_order)
endog = max_spec.endog
if demean:
endog = endog - endog.mean()
if not max_spec.is_ma_consecutive:
raise ValueError('Innovations estimation unavailable for models with'
' seasonal or otherwise non-consecutive MA orders.')
sample_acovf = acovf(endog, fft=True)
theta, v = innovations_algo(sample_acovf, nobs=max_spec.ma_order + 1)
ma_params = [theta[i, :i] for i in range(1, max_spec.ma_order + 1)]
sigma2 = v
out = []
for i in range(max_spec.ma_order + 1):
spec = SARIMAXSpecification(ma_order=i)
p = SARIMAXParams(spec=spec)
if i == 0:
p.params = sigma2[i]
else:
p.params = np.r_[ma_params[i - 1], sigma2[i]]
out.append(p)
# Construct other results
other_results = Bunch({
'spec': spec,
})
return out, other_results
def innovations_mle(endog, order=(0, 0, 0), seasonal_order=(0, 0, 0, 0),
demean=True, enforce_invertibility=True,
start_params=None, minimize_kwargs=None):
"""
Estimate SARIMA parameters by MLE using innovations algorithm.
Parameters
----------
endog : array_like
Input time series array.
order : tuple, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. Default is (0, 0, 0).
seasonal_order : tuple, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity. Default
is (0, 0, 0, 0).
demean : bool, optional
Whether to estimate and remove the mean from the process prior to
fitting the SARIMA coefficients. Default is True.
enforce_invertibility : bool, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization. The
AR polynomial must be stationary. If `enforce_invertibility=True` the
MA poylnomial must be invertible. If not provided, default starting
parameters are computed using the Hannan-Rissanen method.
minimize_kwargs : dict, optional
Arguments to pass to scipy.optimize.minimize.
Returns
-------
parameters : SARIMAXParams object
other_results : Bunch
Includes four components: `spec`, containing the `SARIMAXSpecification`
instance corresponding to the input arguments; `minimize_kwargs`,
containing any keyword arguments passed to `minimize`; `start_params`,
containing the untransformed starting parameters passed to `minimize`;
and `minimize_results`, containing the output from `minimize`.
Notes
-----
The primary reference is [1]_, section 5.2.
Note: we do not include `enforce_stationarity` as an argument, because this
function requires stationarity.
TODO: support concentrating out the scale (should be easy: use sigma2=1
and then compute sigma2=np.sum(u**2 / v) / len(u); would then need to
redo llf computation in the Cython function).
TODO: add support for fixed parameters
TODO: add support for secondary optimization that does not enforce
stationarity / invertibility, starting from first step's parameters
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
spec = SARIMAXSpecification(
endog, order=order, seasonal_order=seasonal_order,
enforce_stationarity=True, enforce_invertibility=enforce_invertibility)
endog = spec.endog
if spec.is_integrated:
warnings.warn('Provided `endog` series has been differenced to'
' eliminate integration prior to ARMA parameter'
' estimation.')
endog = diff(endog, k_diff=spec.diff,
k_seasonal_diff=spec.seasonal_diff,
seasonal_periods=spec.seasonal_periods)
if demean:
endog = endog - endog.mean()
p = SARIMAXParams(spec=spec)
if start_params is None:
sp = SARIMAXParams(spec=spec)
# Estimate starting parameters via Hannan-Rissanen
hr, hr_results = hannan_rissanen(endog, ar_order=spec.ar_order,
ma_order=spec.ma_order, demean=False)
if spec.seasonal_periods == 0:
# If no seasonal component, then `hr` gives starting parameters
sp.params = hr.params
else:
# If we do have a seasonal component, estimate starting parameters
# for the seasonal lags using the residuals from the previous step
_ = SARIMAXSpecification(
endog, seasonal_order=seasonal_order,
enforce_stationarity=True,
enforce_invertibility=enforce_invertibility)
ar_order = np.array(spec.seasonal_ar_lags) * spec.seasonal_periods
ma_order = np.array(spec.seasonal_ma_lags) * spec.seasonal_periods
seasonal_hr, seasonal_hr_results = hannan_rissanen(
hr_results.resid, ar_order=ar_order, ma_order=ma_order,
demean=False)
# Set the starting parameters
sp.ar_params = hr.ar_params
sp.ma_params = hr.ma_params
sp.seasonal_ar_params = seasonal_hr.ar_params
sp.seasonal_ma_params = seasonal_hr.ma_params
sp.sigma2 = seasonal_hr.sigma2
# Then, require starting parameters to be stationary and invertible
if not sp.is_stationary:
sp.ar_params = [0] * sp.k_ar_params
sp.seasonal_ar_params = [0] * sp.k_seasonal_ar_params
if not sp.is_invertible and spec.enforce_invertibility:
sp.ma_params = [0] * sp.k_ma_params
sp.seasonal_ma_params = [0] * sp.k_seasonal_ma_params
start_params = sp.params
else:
sp = SARIMAXParams(spec=spec)
sp.params = start_params
if not sp.is_stationary:
raise ValueError('Given starting parameters imply a non-stationary'
' AR process. Innovations algorithm requires a'
' stationary process.')
if spec.enforce_invertibility and not sp.is_invertible:
raise ValueError('Given starting parameters imply a non-invertible'
' MA process with `enforce_invertibility=True`.')
def obj(params):
p.params = spec.constrain_params(params)
return -arma_innovations.arma_loglike(
endog, ar_params=-p.reduced_ar_poly.coef[1:],
ma_params=p.reduced_ma_poly.coef[1:], sigma2=p.sigma2)
# Untransform the starting parameters
unconstrained_start_params = spec.unconstrain_params(start_params)
# Perform the minimization
if minimize_kwargs is None:
minimize_kwargs = {}
if 'options' not in minimize_kwargs:
minimize_kwargs['options'] = {}
minimize_kwargs['options'].setdefault('maxiter', 100)
minimize_results = minimize(obj, unconstrained_start_params,
**minimize_kwargs)
# TODO: show warning if convergence failed.
# Reverse the transformation to get the optimal parameters
p.params = spec.constrain_params(minimize_results.x)
# Construct other results
other_results = Bunch({
'spec': spec,
'minimize_results': minimize_results,
'minimize_kwargs': minimize_kwargs,
'start_params': start_params
})
return p, other_results
| true
|
2ef215cd82995b997a3f5e5d4b65773d0520dd2c
|
Python
|
gustkdxo007/Solving-Algorithm
|
/PROGRAMMERS/GREEDY/섬연결하기.py
|
UTF-8
| 660
| 3.140625
| 3
|
[] |
no_license
|
def solution(n, costs):
answer = 0
parent = [x for x in range(n+1)]
costs.sort(key=lambda x: x[2])
def find_parent(x, parent):
if parent[x] != x:
parent[x] = find_parent(parent[x], parent)
return parent[x]
def union(a, b, parent):
x = find_parent(a, parent)
y = find_parent(b, parent)
if x < y:
parent[y] = x
else:
parent[x] = y
for s, t, c in costs:
if find_parent(s, parent) != find_parent(t, parent):
answer += c
union(s, t, parent)
return answer
print(solution(4, [[0,1,1],[0,2,2],[1,2,5],[1,3,1],[2,3,8]]))
| true
|
7aef427e917bc0e466ccd3adf85d019b95597f8c
|
Python
|
RadkaValkova/SoftUni-Web-Developer
|
/Programming Basics Python/02 Simple_Conditions_Exam Problems/Sleepy_Tom.py
|
UTF-8
| 426
| 3.828125
| 4
|
[] |
no_license
|
rests_days = int(input())
work_days = 365-rests_days
minutes_in_year = work_days * 63 + rests_days * 127
if minutes_in_year >= 30000:
print('Tom will run away')
print(f'{(minutes_in_year - 30000) // 60} hours and {(minutes_in_year - 30000)% 60} minutes more for play')
else:
print('Tom sleeps well')
print(f'{(30000 - minutes_in_year) // 60} hours and {(30000 - minutes_in_year) % 60} minutes less for play')
| true
|
0d3e9ef1797c9467916fbb420a2066829fb2159a
|
Python
|
NathanaelCarauna/UriResolucoesPython
|
/1131.py
|
UTF-8
| 806
| 3.328125
| 3
|
[] |
no_license
|
continuar = 1
grenais = 0
interVitorias = 0
gremioVitorias = 0
empates =0
while continuar ==1:
grenais +=1
golsInter, golsGremio = map(int,input().split())
if golsInter>golsGremio:
interVitorias+=1
elif(golsInter==golsGremio):
empates+=1
else:
gremioVitorias+=1
new = -1
while new!= 1 and new!=2:
new = int(input("Novo grenal (1-sim 2-nao)\n"))
if new == 1:
pass
elif new == 2:
continuar = 2
print("%d grenais" %(grenais))
print("Inter:%d" %(interVitorias))
print("Gremio:%d" %(gremioVitorias))
print("Empates:%d" %(empates))
if interVitorias>gremioVitorias:
print("Inter venceu mais")
elif interVitorias == gremioVitorias:
print("Nao houve vencedor")
else:
print("Gremio venceu mais")
| true
|
6f18dcaa7ad2e87e8d8b9160626246e15ead357f
|
Python
|
alxmancilla/data_migrator
|
/employee_migration.py
|
UTF-8
| 3,423
| 2.8125
| 3
|
[] |
no_license
|
import datetime
import mysql.connector
import pymongo
def get_MySQL_Cnx():
# For local use
cnx = mysql.connector.connect(user='demo', password='demo00',
host='127.0.0.1',
database='employees')
return cnx
def get_MDB_cnx():
# For local use
# conn = pymongo.MongoClient("mongodb://demo:demo00@mycluster0-shard-00-00.mongodb.net:27017,mycluster0-shard-00-01.mongodb.net:27017,mycluster0-shard-00-02.mongodb.net:27017/admin?ssl=true&replicaSet=Mycluster0-shard-0&authSource=admin")
conn=pymongo.MongoClient("mongodb://localhost:27017")
return conn
def get_employee_salaries(_cnx, emp_no):
_cursor = _cnx.cursor()
salaries = []
subquery = ("SELECT salary, from_date, to_date "
"FROM employees.salaries WHERE emp_no = %(emp_no)s ")
#print "subquery {} ".format(subquery)
_cursor.execute(subquery, { "emp_no": emp_no })
for (salary, from_date, to_date) in _cursor:
salary = {
"salary" : salary,
"from_date" : datetime.datetime.strptime(from_date.isoformat(), "%Y-%m-%d"),
"to_date" : datetime.datetime.strptime(to_date.isoformat(), "%Y-%m-%d"),
}
#print "Adding salary {}".format(salary)
salaries.append(salary)
_cursor.close()
return salaries
def migrate_employee_data():
# Connection to Mongo DB
cursor = cnx.cursor()
mdb_cnx = get_MDB_cnx()
print("Connection established successfully!!!")
print("{}".format(datetime.datetime.now()))
query = ("SELECT emp_no, birth_date, first_name, last_name, gender, hire_date "
"FROM employees.employees LIMIT 1000")
cursor.execute(query)
for (emp_no, birth_date, first_name, last_name, gender, hire_date) in cursor:
employee={
"emp_no": emp_no,
"first_name": first_name,
"last_name": last_name,
"gender": gender,
"birth_date": datetime.datetime.strptime(birth_date.isoformat(), "%Y-%m-%d"),
"hire_date": datetime.datetime.strptime(hire_date.isoformat(), "%Y-%m-%d"),
"current_salary": "",
"salaries": [],
}
employee['salaries'] = get_employee_salaries(cnx_2, emp_no)
last_item = len(employee['salaries']) - 1;
employee['current_salary'] = employee['salaries'][last_item]['salary']
#print "Inserting employee {}".format(emp_no)
# inserting the data into MongoDB database
#print(".", end=' ')
insert_employee_data(mdb_cnx, employee)
print(".")
print("{}".format(datetime.datetime.now()))
print("Migration completed successfully!!!")
cursor.close()
cnx.close()
cnx_2.close()
cnx_3.close()
mdb_cnx.close()
def insert_employee_data(conn, employee):
collection = conn.demo.employees
emp_id = collection.insert_one(employee)
return emp_id
if __name__ == "__main__":
# For local use
cnx = get_MySQL_Cnx()
cnx_2 = get_MySQL_Cnx()
cnx_3 = get_MySQL_Cnx()
start_time = datetime.datetime.utcnow()
migrate_employee_data()
end_time = datetime.datetime.utcnow()
print("end time: ", end_time)
print( (end_time - start_time), " seconds")
| true
|
2749fb360331454345a30cde57213266d181c50f
|
Python
|
BennyJane/python-demo
|
/SqlIndex/A4p3.py
|
UTF-8
| 2,724
| 2.984375
| 3
|
[] |
no_license
|
import random
import sqlite3
import time
from settings import DB_NAMES
from utils import load_country_data
from utils import get_average
from utils import change_index
# PART3 查询随机选择的国家中最大price的值
EXECUTE_NUMS = 100
SELECT_SQL = "SELECT * FROM Parts WHERE madeIn = '{}' order by partPrice desc limit 1;"
# SELECT_SQL = "SELECT * FROM Parts WHERE partPrice = (SELECT MAX(partPrice) FROM Parts WHERE madeIn = '{}');"
# SELECT_SQL = "SELECT * FROM Parts WHERE partPrice = (SELECT * FROM Parts WHERE madeIn = '{}' order by partPrice desc limit 1);"
# 创建索引: idxMadeIn
CREATE_INDEX = "CREATE INDEX idxMadeIn ON Parts ( MadeIn );"
DROP_INDEX = "DROP INDEX idxMadeIn;"
# 创建索引: idxPartPrice
CREATE_INDEX2 = "CREATE INDEX idxPartPrice ON Parts ( partPrice );"
DROP_INDEX2 = "DROP INDEX idxPartPrice;"
# 创建索引: idxPartPriceAndMadeIn
CREATE_INDEX3 = "CREATE INDEX idxPartPriceAndMadeIn ON Parts ( partPrice, madeIn );"
DROP_INDEX3 = "DROP INDEX idxPartPriceAndMadeIn;"
def execute_query():
origin_data = load_country_data()
random.shuffle(origin_data)
for db_name in DB_NAMES:
conn = sqlite3.connect(db_name)
print(f"Opening {db_name}")
time_point1 = time.time()
for _ in range(EXECUTE_NUMS):
select_q1 = SELECT_SQL.format(random.choice(origin_data)["Code"])
conn.execute(select_q1)
time_point2 = time.time()
for _ in range(EXECUTE_NUMS):
select_q2 = SELECT_SQL.format(random.choice(origin_data)["Code"])
conn.execute(select_q2)
time_point3 = time.time()
q1_time_sum = (time_point2 - time_point1)
q2_time_sum = (time_point3 - time_point2)
print(f"Average query time for Query Q1: {get_average(q1_time_sum)} ms")
print(f"Average query time for Query Q2: {get_average(q2_time_sum)} ms")
conn.close()
print(f"Closing {db_name}")
# 第二种索引最快,
def main():
print("Executing Part 3\n")
print("Executing Task A")
execute_query()
# 测试第一种索引设置
print("\nCreating Index1")
change_index(CREATE_INDEX)
print("\nExecuting Task B")
execute_query()
print("\nDrop Index")
change_index(DROP_INDEX)
# 测试第二种索引设置
print("\nCreating Index2")
change_index(CREATE_INDEX2)
print("\nExecuting Task B")
execute_query()
print("\nDrop Index2")
change_index(DROP_INDEX2)
# 测试第三种索引设置
print("\nCreating Index3")
change_index(CREATE_INDEX2)
print("\nExecuting Task B")
execute_query()
print("\nDrop Inde3x")
change_index(DROP_INDEX2)
if __name__ == '__main__':
main()
| true
|
7d269c6c2ad74df44f5f4adf39374d75c709fd8d
|
Python
|
HalfMoonFatty/L
|
/007. HashTable.py
|
UTF-8
| 1,989
| 4.09375
| 4
|
[] |
no_license
|
"""Thread-safe hash table.
"""
from threading import Lock
class HashTable(object):
def __init__(self, capacity):
self.data = [[] for _ in range(capacity)]
self.capacity = capacity
self.size = 0
self.lock = Lock()
def __str__(self):
return '\n'.join(str(bucket) for bucket in self.data)
def _Rehash(self):
# Double the size of the hash table ane rehashes all existing kv pairs.
# Caller must hold table lock.
self.capacity *= 2
new_data = [[] for _ in range(self.capacity)]
for bucket in self.data:
for key, value in bucket:
new_data[self._Hash(key)].append((key, value))
self.data = new_data
def _Hash(self, key):
# Computes the hash value of the given key.
return int(''.join([str(ord(c)) for c in str(key)])) % self.capacity
def Put(self, key, value):
# Stores the kv pair in hash table
self.Remove(key)
with self.lock:
self.data[self._Hash(key)].append((key, value))
self.size += 1
if self.size == self.capacity:
self._Rehash()
def Get(self, key):
# Gets the value for the given key. Raise KeyError if key is not found.
with self.lock:
bucket = self.data[self._Hash(key)]
for k, v in bucket:
if k == key:
return v
raise KeyError('Elememnt not found for key ' + str(key))
def Remove(self, key):
# Removes the kv pair for the given key. No-op if key is not found.
with self.lock:
bucket = self.data[self._Hash(key)]
for i in range(len(bucket)):
if bucket[i][0] == key:
bucket.remove(bucket[i])
self.size -= 1
return
# Test cases.
ht = HashTable(5)
ht.Put(1, 'a')
ht.Put(2, 'b')
ht.Put(3, 'c')
ht.Put(4, 'd')
ht.Put(5, 'e')
ht.Put(6, 'f')
assert ht.Get(1) == 'a'
assert ht.Get(2) == 'b'
assert ht.Get(3) == 'c'
assert ht.Get(4) == 'd'
assert ht.Get(5) == 'e'
assert ht.Get(6) == 'f'
ht.Remove(4)
print ht
# ht.Get(7) # <-- This shoud raise KeyError exception.
| true
|
d2bba206432a3c0290dfec85bc07c03603df4560
|
Python
|
seminvest/investment
|
/daily_scan_pricewarning_5_11_2019.py
|
UTF-8
| 2,819
| 2.90625
| 3
|
[] |
no_license
|
#https://stackoverflow.com/questions/48071949/how-to-use-the-alpha-vantage-api-directly-from-python
#https://www.profitaddaweb.com/2018/07/alpha-vantage-preprocessed-free-apis-in.html
import requests
import alpha_vantage
import json
import pandas as pd
import datetime
import numpy as np
import time
from mpl_finance import candlestick_ohlc
import matplotlib
import matplotlib.dates as mdates
import matplotlib.path as mpath
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib import style
import os
import sys
import colorama
from colorama import Fore, Style
def get_stock_time_frame(symbol, start_date, end_date):
dates=pd.date_range(start_date,end_date)
df1=pd.DataFrame(index=dates)
root = '/Users/ruitang/Dropbox/Program/Stock_Analysis'
day = 'daily_data'
subdir = os.path.join(root, day,symbol + '.csv')
df_temp=pd.read_csv(subdir,index_col="date",parse_dates=True,na_values=['nan'])
df1=df1.join(df_temp,how='inner')
df1.to_csv('tmp.csv')
return df1
def compute_returns_general(df,general):
"""Compute and return the daily return values."""
# TODO: Your code here
# Note: Returned DataFrame must have the same number of rows
daily_returns = df.copy()
columnname = str(general)+"days"
#daily_returns = 0
daily_returns[general:] = (df[general:]/df[:-general].values)-1
daily_returns = daily_returns.rename(columns={"close":columnname})
daily_returns.iloc[0:general] = 0
#daily_returns.round(3)
return daily_returns.round(3)
if __name__ == "__main__":
print(sys.argv[1])
data = pd.read_csv("/Users/ruitang/Dropbox/Program/Stock_Analysis/watchlist.csv")
start_date='2017-12-29'
end_date=sys.argv[1]
threshold=0.81
for j in range(len(data)):
symbol=data.loc[j,'symbol']
FV=data.loc[j,'FV']
df= get_stock_time_frame(symbol,start_date,end_date)
df1=df.loc[start_date:end_date,'adjusted close']
end_price=df1.iloc[-1]
if float(end_price)< float(FV)*0.65:
#DV=round(float(FV)*threshold)
print(Fore.GREEN + symbol,end_price,FV,'strong buy')
print(Style.RESET_ALL)
elif float(FV)*0.81>=float(end_price)> float(FV)*0.65:
print(Fore.BLUE + symbol,end_price,FV,'buy')
print(Style.RESET_ALL)
elif float(FV)>=float(end_price)> float(FV)*0.81:
print(Fore.BLACK + symbol,end_price,FV,'hold')
print(Style.RESET_ALL)
elif float(FV)*1.2 >= float(end_price)> float(FV):
print(Fore.MAGENTA + symbol,end_price,FV,'sell')
print(Style.RESET_ALL)
elif float(end_price)> float(FV)*1.2:
print(Fore.RED+ symbol,end_price,FV,'strong sell')
print(Style.RESET_ALL)
| true
|
4a2994e626e8edba470a16e7eb39b97a9a61038a
|
Python
|
ranqiu92/ReorderNAT
|
/util.py
|
UTF-8
| 2,043
| 2.765625
| 3
|
[] |
no_license
|
import random
import numpy as np
import torch
import torch.nn as nn
class Transformer_LR_Schedule():
def __init__(self, model_size, warmup_steps):
self.model_size = model_size
self.warmup_steps = warmup_steps
def __call__(self, step):
step += 1
scale = self.model_size ** -0.5
scale *= min(step ** -0.5, step * self.warmup_steps ** -1.5)
return scale
class Linear_LR_Schedule():
def __init__(self, initial_lr, final_lr, total_steps):
self.initial_lr = initial_lr
self.slope = (initial_lr - final_lr) / total_steps
def __call__(self, step):
scale = 1.0 - step * self.slope / self.initial_lr
scale = max(scale, 0.)
return scale
def set_random_seed(seed, is_cuda):
if seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
if is_cuda and seed > 0:
torch.cuda.manual_seed(seed)
return seed
def sequence_mask(lengths, max_len=None):
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def mean(input, input_len=None):
if input_len is not None:
max_len = input.size(1)
mask = ~sequence_mask(input_len, max_len).to(input.device)
masked_input = input.masked_fill(mask.unsqueeze(-1), 0)
input_sum = torch.sum(masked_input, dim=1)
input_mean = input_sum / input_len.unsqueeze(-1).float()
return input_mean
else:
return torch.mean(input, dim=1)
| true
|
049e754dc40f80c0997fd6aef453bc91f4d914ea
|
Python
|
Andy-Fraley/investment_scraper
|
/investment_data_json2csv.py
|
UTF-8
| 4,417
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import argparse
from util import util
import sys
import os
import re
import json
import csv
def ListDatasets(investment_json_filename):
investment_data = json.load(investment_json_filename)
dataset_names = []
for trading_symbol in investment_data:
for dataset_name in investment_data[trading_symbol]:
if dataset_name not in dataset_names:
dataset_names.append(dataset_name)
if len(dataset_names) > 0:
dataset_names.sort()
print('Datasets:')
for dataset_name in dataset_names:
print(dataset_name)
else:
print('Strange. Data has no datasets')
return
def DictionaryDepth(d, level=1):
if not isinstance(d, dict) or not d:
return level
return max(DictionaryDepth(d[k], level + 1) for k in d)
def Dataset2StringName(dataset):
s = dataset.replace('%', 'perc')
s = s.replace(' ', '_')
s = s.replace('/', '_')
return s.lower()
def ExtractTimestampPrefix(s):
m = re.search('([0-9]{14})_', s)
if m:
return m.group(0)
else:
return ''
def ExtractDataset2CsvFile(investment_json_filename, extract_dataset):
dataset_stringname = Dataset2StringName(extract_dataset)
timestamp_prefix = ExtractTimestampPrefix(investment_json_filename.name)
investment_data = json.load(investment_json_filename)
found_data = False
output_csv_file = csv.writer(open('./tmp/' + timestamp_prefix + dataset_stringname + '.csv', 'w'))
for trading_symbol in investment_data:
for dataset_name in investment_data[trading_symbol]:
if dataset_name == extract_dataset:
found_data = True
dataset_stringname = Dataset2StringName(dataset_name)
dataset_depth = DictionaryDepth(investment_data[trading_symbol][dataset_name])
if dataset_depth == 3:
for stat_name in investment_data[trading_symbol][dataset_name]:
for timeperiod in investment_data[trading_symbol][dataset_name][stat_name]:
output_csv_file.writerow([str(trading_symbol), str(stat_name), str(timeperiod),
str(investment_data[trading_symbol][dataset_name][stat_name][timeperiod])])
elif dataset_depth == 2:
for stat_name in investment_data[trading_symbol][dataset_name]:
output_csv_file.writerow([str(trading_symbol), str(stat_name),
str(investment_data[trading_symbol][dataset_name][stat_name])])
return
# Fake class only for purpose of limiting global namespace to the 'g' object
class g:
args = None
def main(argv):
global g
parser = argparse.ArgumentParser()
parser.add_argument('--extract-dataset', required=False, help='Name of dataset in the input JSON file to ' \
'extract into output CSV file. NOTE: Output file will be timestamped derivative of input JSON file and ' \
'dataset name.')
parser.add_argument('--list-datasets', action='store_true', help='If specified, overrides all other flags and ' \
'opens input JSON file and dumps list of datasets found in the file.')
parser.add_argument('--investment-json-filename', required=False, type=argparse.FileType('r'),
help='Name of input JSON file containing investment data retrieved using get_investment_data.py')
parser.add_argument('--message-output-filename', required=False, help='Filename of message output file. If ' +
'unspecified, defaults to stderr')
g.args = parser.parse_args()
message_level = 'Info'
util.set_logger(message_level, g.args.message_output_filename, os.path.basename(__file__))
if not ( g.args.list_datasets and g.args.investment_json_filename is not None) and \
(g.args.investment_json_filename is None or g.args.extract_dataset is None):
print('NOTE: Must specify either (--investment-json-filename and --list-datasets) or '\
'(--investment-json-filename and --extract-dataset)')
parser.print_help()
util.sys_exit(0)
if g.args.list_datasets:
ListDatasets(g.args.investment_json_filename)
else:
ExtractDataset2CsvFile(g.args.investment_json_filename, g.args.extract_dataset)
util.sys_exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
| true
|
5530fa1e694854a8190418131fed32d9f95dd5a8
|
Python
|
jeffvswanson/LeetCode
|
/0557_ReverseWordsInAString3/python/test_solution.py
|
UTF-8
| 615
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
import pytest
import solution
@pytest.mark.parametrize(
"s,expected",
[
("Let's take LeetCode contest", "s'teL ekat edoCteeL tsetnoc"),
("God Ding", "doG gniD"),
("h", "h"),
],
)
def test_initial_solution(s, expected):
got = solution.initial_solution(s)
assert got == expected
@pytest.mark.parametrize(
"s,expected",
[
("Let's take LeetCode contest", "s'teL ekat edoCteeL tsetnoc"),
("God Ding", "doG gniD"),
("h", "h"),
],
)
def test_faster_solution(s, expected):
got = solution.faster_solution(s)
assert got == expected
| true
|
9fff032c0cfa0697cfa94bff52ee1b2648391987
|
Python
|
namujinju/study-note
|
/python/Hon Gong Pa/200627.py
|
UTF-8
| 839
| 4.34375
| 4
|
[] |
no_license
|
# 파이썬은 변수에 자료형을 지정하지 않지만 TypeError가 발생할 확률이 높기 때문에
# 하나의 변수에는 되도록 하나의 자료형을 넣어 활용하는 것이 좋다.
string = "안녕하세요"
string += "!"
string += "!"
print(string)
# number = input("인사말을 입력하세요> ") # 사용자가 무엇을 입력해도 결과는 무조건 문자열 자료형이다.
# print(type(number))
a = input("첫 번째 숫자")
b = input("두 번째 숫자")
c = float(a) + int(b)
print(c)
a = input("첫 번째 글자")
b = input("두 번째 글자")
c = a + b
print(c)
output = str(52)
print(type(output))
#연습문제 6번
a = input("문자열 입력> ")
b = input("문자열 입력> ")
print(a, b) #튜플을 공부하기 전 스왑(swap)해보기 // 변수 교체
c = a
a = b
b = c
print(a, b)
| true
|
515a737624becc148aa4bd67633a3e5eedaba469
|
Python
|
fallengravity/python-playground
|
/main.py
|
UTF-8
| 1,032
| 3.109375
| 3
|
[] |
no_license
|
from web3 import Web3
import urllib.request, json
import decimal
w3 = Web3(Web3.HTTPProvider("https://rpc.ether1.cloud"))
# Replace the address below with your own
address = Web3.toChecksumAddress('0xfbd45d6ed333c4ae16d379ca470690e3f8d0d2a2')
balance = w3.eth.getBalance(address)
balance_formatted = w3.fromWei(balance, 'ether')
pizza_cost = 4.99
with urllib.request.urlopen(
"https://min-api.cryptocompare.com/data/price?fsym=ETHO&tsyms=USD"
) as url:
data = json.loads(url.read().decode())
wallet_value = balance_formatted * decimal.Decimal(data["USD"])
pizza_count = wallet_value / decimal.Decimal(pizza_cost)
print("Balance in Wei: " + str(balance))
print("Price of a Pizza: $" + str(pizza_cost))
print("Balance in ETHO: " + str(balance_formatted))
print("Value of 1 ETHO in USD: $" + str(data["USD"]))
print("Current Value of your Ether-1 Wallet in USD: $" +
str(format(wallet_value, '.2f')))
print("You can currently afford " + str(format(pizza_count, '.2f')) +
" pizzas from Little Ceasars")
| true
|
efb851d541b987e54701ecdace48e443abb97c2a
|
Python
|
KRMA-Radio/Server-Side-Analytics
|
/Access.py
|
UTF-8
| 3,974
| 2.859375
| 3
|
[] |
no_license
|
import json
import time
__author__ = 'Isaac'
class Access:
def __init__(self, ip, at: time.struct_time, http_method, host, page, response_code, http_referer, user_agent, length):
self.ip = ip
self.time = at
self.http_method = http_method
self.host = host
self.page = page
self.response_code = response_code
self.http_referer = http_referer
self.user_agent = user_agent
self.length = length
# We compare by the date at which the event took place
def __cmp__(self, other):
return time.mktime(self.time).__cmp__(time.mktime(other.time))
def __str__(self):
return json.dumps(self.__dict__)
'''
Parses a line like: 162.251.161.74 - admin [13/Sep/2015:18:45:09 -0400] "GET /admin/stats.xml?mount=/KRMARadio-LIVE HTTP/1.1" 200 3513 "-" "Mozilla/4.0 (StreamLicensing Directory Tester)" 0
into a Access object like
{
ip: "162.251.161.74"
time: "13/Sep/2015:18:45:09 -0400"
host: host
http_method: GET
page: "/admin/stats.xml?mount=/KRMARadio-LIVE"
response_code: 200
http_referer: "-"
user_agent: "Mozilla/4.0 (StreamLicensing Directory Tester)"
length: 0
}
'''
@classmethod
def from_file(cls, file, host=""):
log = []
line = file.readline()
while line != "":
access = cls.from_line(line, host)
log.append(access)
line = file.readline()
return log
@classmethod
def from_line(cls, line: str, host: str):
ip, passed = cls.parse_token(line, " ")
line = line[passed:]
# we'll just throw this away as I'm not quite sure what it's for
blank, passed = cls.parse_token(line, " ")
if blank != '-':
print(blank + " expected -")
line = line[passed:]
# we'll throwing this away too. It's important, but I don't know what to do with it
user, passed = cls.parse_token(line, " ")
line = line[passed:]
at, passed = cls.parse_token(line, "[", "]")
#at = time.strptime(at, "%d/%b/%Y:%H:%M:%S %Z")
line = line[passed + 1:]
# get the entire request looks something like:
# GET /admin/stats.xml?mount=/KRMARadio-LIVE HTTP/1.1" 200 3513 "-" "Mozilla/4.0 (StreamLicensing Directory Tester)"
request, passed = cls.parse_token(line, '"')
line = line[passed:]
http_method, passed = cls.parse_token(request, " ")
request = request[passed:]
page, passed = cls.parse_token(request, " ")
response_code, passed = cls.parse_token(line, " ")
response_code = int(response_code)
line = line[passed:]
# we'll just throw this away as I'm not quite sure what it's for TODO: figure out what this does
blank, passed = cls.parse_token(line, " ")
line = line[passed:]
http_referer, passed = cls.parse_token(line, '"')
if http_referer == '-':
http_referer = None
line = line[passed+1:]
user_agent, passed = cls.parse_token(line, '"')
line = line[passed:]
length, passed = cls.parse_token(line, " ", "\n")
length = int(length)
return Access(ip, at, http_method, host, page, response_code, http_referer, user_agent, length)
# Access.parse_ip(string) returns the ip address which starts the supplied string
# It assumes the ip is separated from the next element by space
@classmethod
def parse_token(cls, string: str, start: str, end: str = None):
if end is None:
end = start
word = bytearray()
in_word = False
covered = 0
for c in string:
covered += 1
if c != start and c != end:
word.append(ord(c))
in_word = True
elif in_word:
break
return word.decode("utf-8"), covered
| true
|
8b9fbfcaa527ae461b0041a04db5975cbe8041ba
|
Python
|
Davy971/PhylEntropy
|
/phylogene_app/utils.py
|
UTF-8
| 4,511
| 3.171875
| 3
|
[] |
no_license
|
###############################
# UPGMA #
###############################
# lowest_cell:
# Locates the smallest cell in the table
def lowest_cell(table):
# Set default to infinity
min_cell = float("inf")
x, y = -1, -1
# Go through every cell, looking for the lowest
for i in range(len(table)):
for j in range(len(table[i])):
if table[i][j] < min_cell:
min_cell = table[i][j]
x, y = i, j
# Return the x, y co-ordinate of cell
return x, y
# join_labels:
# Combines two labels in a list of labels
def join_labels(labels, a, b,val):
# Swap if the indices are not ordered
if b < a:
a, b = b, a
# Join the labels in the first index
labels[a] = "(" + labels[a] + ":"+ str(val)+ "," + labels[b] + ":" + str(val) + ")"
# Remove the (now redundant) label in the second index
del labels[b]
# join_table:
# Joins the entries of a table on the cell (a, b) by averaging their data entries
def join_table(table, a, b):
# Swap if the indices are not ordered
val= table[a][b] /2
if b < a:
a, b = b, a
# For the lower index, reconstruct the entire row (A, i), where i < A
row = []
for i in range(0, a):
row.append((table[a][i] + table[b][i]) / 2)
table[a] = row
# Then, reconstruct the entire column (i, A), where i > A
# Note: Since the matrix is lower triangular, row b only contains values for indices < b
for i in range(a + 1, b):
table[i][a] = (table[i][a] + table[b][i]) / 2
# We get the rest of the values from row i
for i in range(b + 1, len(table)):
table[i][a] = (table[i][a] + table[i][b]) / 2
# Remove the (now redundant) second index column entry
del table[i][b]
# Remove the (now redundant) second index row
del table[b]
return val
################################
# COMMUN #
################################
def reduce_table(table,labelSeq):
taille1=len(table)
index_ban=[]
tab_reduce=[]
label_reduce=[]
index_sommet={}
reverse_index={}
ensemble=[]
for i in range(len(labelSeq)):
index_sommet[labelSeq[i]]=i
reverse_index[i]= [labelSeq[i]]
for i in range(taille1):
verif = 0
taille2= len(table[i])
for j in range(taille2):
if table[i][j] ==0:
if i not in index_ban:
index_ban.append(i)
tab1=reverse_index[index_sommet[labelSeq[i]]]
tab2=reverse_index[index_sommet[labelSeq[j]]]
for elmt in tab1 :
if elmt not in tab2:
tab2.append(elmt)
reverse_index[index_sommet[labelSeq[j]]]=tab2
for bct in reverse_index[index_sommet[labelSeq[j]]]:
index_sommet[bct]=index_sommet[labelSeq[j]]
verif=1
if verif==0:
tmp = []
for cpt in range(taille2):
if cpt not in index_ban:
tmp.append(table[i][cpt])
tab_reduce.append(tmp)
deja=[]
for key,value in index_sommet.items():
if value not in deja:
ensemble.append(reverse_index[value])
deja.append(value)
for i in range(len(ensemble)):
chn = ""
for j in range(len(ensemble[i])):
chn += ensemble[i][j]
if j < len(ensemble[i]) - 1:
chn += "+"
label_reduce.append(chn)
return tab_reduce,label_reduce,ensemble
####################
# kruskal #
####################
def fusion(L_1, L_2):
L = []
k, l = len(L_1), len(L_2)
i, j = 0, 0
while i < k and j < l:
if L_1[i][0] <= L_2[j][0]:
L.append(L_1[i])
i += 1
else:
L.append(L_2[j])
j += 1
if i == k and j < l:
L = L + L_2[j:]
elif j == l and i < k:
L = L + L_1[i:]
return L
def tri_fusion(L):
if len(L) <= 1:
return (L)
else:
m = len(L) // 2
return fusion(tri_fusion(L[0:m]), tri_fusion(L[m:]))
def countFreq(arr, n):
visited = [False for i in range(n)]
result=[]
for i in range(n):
if (visited[i] == True):
continue
count = 1
for j in range(i + 1, n, 1):
if (arr[i] == arr[j]):
visited[j] = True
count += 1
result.append(count/n)
return result
| true
|
bda0c3815530ec76b7fff36779c801fa79fada40
|
Python
|
Noisynose/adventofcode
|
/day1/day1_part2.py
|
UTF-8
| 1,141
| 3.53125
| 4
|
[] |
no_license
|
def variationsFromFile(fileName):
variations = []
for variation in open(fileName, 'r'):
variations.append(int(variation))
return variations
# day1_part1
def totalFrequency(variations):
frequency = 0
for variation in variations:
frequency += variation
return frequency
# day1_part2
def applyFrequenciesTwice(variations):
frequency = 0
for variation in variations+variations:
frequency += variation
return frequency
def findFirstSecondFrequency(variations, numberOfIterations):
frequency = 0
frequencies = [0]
found = False
for _ in range(1, numberOfIterations):
for variation in variations:
frequency += variation
if frequency in frequencies:
found = True
break
frequencies.append(frequency)
if found:
break
return frequency
variations = variationsFromFile('input.txt')
# print(variations)
endFrequency = totalFrequency(variations)
# print(endFrequency)
firstSecondFrequency = findFirstSecondFrequency(variations, 200)
print(firstSecondFrequency)
| true
|
49942cf3785125b79a7ae0a6279557097a1e45b7
|
Python
|
whaleygeek/bitio
|
/src/try/test_GPIO.py
|
UTF-8
| 454
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
# WORK IN PROGRESS - DO NOT USE
import microbit.GPIO as GPIO
import time
GPIO.setmode(GPIO.MICROBIT)
BUTTON = 0
LED = 1
GPIO.setup(LED, GPIO.OUT)
GPIO.setup(BUTTON, GPIO.IN)
try:
while True:
if GPIO.input(BUTTON) == False: # active low
print("Button pressed")
GPIO.output(LED, True)
time.sleep(0.25)
GPIO.output(LED, False)
time.sleep(0.25)
finally:
GPIO.cleanup()
# END
| true
|
45ec6d4cc16555bac86449b3a43d048bf7ad68f3
|
Python
|
lomoeg/lksh-2015
|
/day8/b.py
|
UTF-8
| 402
| 2.796875
| 3
|
[] |
no_license
|
def metbefore(seq):
s = set()
len1 = len(s)
for i in range(len(seq)):
s.add(seq[i])
if len(s) == len1:
print('YES', file = f_out)
else:
len1 = len(s)
print('NO', file = f_out)
f_in = open('metbefore.in')
seq1 = list(map(int, f_in.readline().split()))
f_in.close()
f_out = open('metbefore.out', 'w')
metbefore(seq1)
f_out.close()
| true
|
1faac0c393418c4180cb0d0c5d4130450423a9ff
|
Python
|
zubairAhmed777/python
|
/calc.py
|
UTF-8
| 132
| 2.984375
| 3
|
[] |
no_license
|
def add(x,y):
# return x+y
pass
def sub(x,y):
# return x-y
pass
def mul(x,y):
return x*y
# pass
def div(x,y):
return x/y
# pass
| true
|
25f1cb618615a5ea2502e20858dbc8439ad73149
|
Python
|
siberian122/kyoupuro
|
/practice/Wanna-go-back-home.py
|
UTF-8
| 418
| 3.03125
| 3
|
[] |
no_license
|
s=list(input())
count=[0 for i in range(4)]
for i in s:
if i=='N':
count[0]+=1
elif i=='S':
count[1]+=1
elif i=='E':
count[2]+=1
elif i=='W':
count[3]+=1
if count[0]>0 and count[1]==0:
print('No')
elif count[1]>0 and count[0]==0:
print('No')
elif count[2]>0 and count[3]==0:
print('No')
elif count[3]>0 and count[2]==0:
print('No')
else:
print('Yes')
| true
|
10e7e66bcf93b13b230798cd4396b17a367cf48b
|
Python
|
Loveashik/Music-classification
|
/classification/model_training.py
|
UTF-8
| 1,640
| 2.65625
| 3
|
[] |
no_license
|
"""
Основной цикл обучения модели
"""
from functools import partial
import torch
from sklearn.metrics import f1_score, accuracy_score
from torch.nn import BCELoss
from torch.optim import Adam
from tqdm import tqdm
from data_loaders import train_dataloader, val_dataloader
from model import resnet_model
from tools import TrainEpoch, ValidEpoch
EPOCHS = 20
loss = BCELoss() # функция потерь
# Определим метрики, за которыми будем следить во время обучения:
f1_multiclass = partial(f1_score, average="samples") # f1-метрика
f1_multiclass.__name__ = 'f1'
accuracy = accuracy_score # метрика accuracy
accuracy.__name__ = 'accuracy'
optimizer = Adam(resnet_model.parameters()) # Оптимизатор
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
device = 'cuda:0'
train_epoch = TrainEpoch(
resnet_model,
loss=loss,
metrics=[f1_multiclass, accuracy_score],
optimizer=optimizer,
device=device,
verbose=True,
)
valid_epoch = ValidEpoch(
resnet_model,
loss=loss,
metrics=[f1_multiclass, accuracy_score],
device=device,
verbose=True,
)
for i in tqdm(range(EPOCHS)):
print(f'\nEpoch: {i + 1}')
train_logs = train_epoch.run(train_dataloader)
valid_logs = valid_epoch.run(val_dataloader)
scheduler.step(valid_logs['loss'])
torch.save(resnet_model, f'saved_model_{valid_logs["f1"]:.2f}') # Пишем в название модели f1 score на валидационной выборке
| true
|
027f6e6f2fe78ba51a83264d71acff3fc8cdb389
|
Python
|
AdamZhouSE/pythonHomework
|
/Code/CodeRecords/2593/60620/258198.py
|
UTF-8
| 636
| 2.65625
| 3
|
[] |
no_license
|
t=int(input())
for i in range(t):
n=int(input())
s=input()
if(s[1]==','):
a=list(map(int,s.split(',')))
else:
a=list(map(int,s.split()))
b=[]
result=[]
num=0
for j in range(n-1):
for k in range(j+1,n):
b.append(a[j]+a[k])
for j in b:
if(b.count(j)>1):
num=j
break
for j in range(n-1):
for k in range(j+1,n):
if(a[j]+a[k]==num):
result.append(j)
result.append(k)
result=result[:4]
if(num==0):
print('no pairs')
else:
print(*result)
| true
|
e92158775e0ed0e0ddb835129e3b0c68b4f807a6
|
Python
|
35sebastian/Proyecto_Python_1
|
/CaC Python/EjerciciosPy1/Ej9.py
|
UTF-8
| 981
| 4.09375
| 4
|
[] |
no_license
|
#
# Mi resolución:
#
# inversion = float(input("Introduce la cantidad a invertir: "))
# interes= int(input("Introduce el porcentaje de interés anual: "))
# anos= int(input("Introduce el número de años de inversión: "))
#
# '''
# capital = 0
#
# for i in range(anos):
# inversion = inversion + (inversion * (interes *0.01))
# capital += inversion
# '''
#
# print("el interes obtenido por la inversion en", anos, " años es de: ", inversion*(interes*0.01)*anos)
#
# amount = float(input("¿Cantidad a invertir? "))
# interest = float(input("¿Interés porcentual anual? "))
# years = int(input("¿Años? "))
# for i in range(years):
# amount = (amount * (1 + interest))
# print("Capital tras " + str(i+1) + " años: " + str(round(amount)))
amount = float(input("¿Cantidad a invertir? "))
interest = float(input("¿Interés porcentual anual? "))
years = int(input("¿Años? "))
print(round(amount * (1 + interest)**years))
| true
|
b3193c34abf17c2d4a5b106a14bf97a212539d6f
|
Python
|
rajlath/rkl_codes
|
/Hackerrank/construct_an_array.py
|
UTF-8
| 835
| 2.9375
| 3
|
[] |
no_license
|
'''
#include <bits/stdc++.h>
using namespace std;
const int mod = 1e9 + 7;
signed main()
{
//freopen("input.txt", "r", stdin);
ios::sync_with_stdio(0);
cin.tie(0);
int n, k, x;
cin >> n >> k >> x;
assert(3 <= n && n <= 100000);
assert(2 <= k && k <= 100000);
assert(1 <= x && x <= k);
int d[n];
d[0] = 0;
d[1] = 1;
for(int i = 2; i < n; i++)
d[i] = (1LL * (k - 2) * d[i - 1] + 1LL * (k - 1) * d[i - 2]) % mod;
cout << (x == 1 ? 1LL * (k - 1) * d[n - 2] % mod : d[n - 1]) << endl;
}
761 99 1
236568308
'''
mod = int(1e9 + 7)
n, k, x = [int(x) for x in input().split()]
d = [0]*n
d[0] = 0
d[1] = 1
for i in range(2, n):
d[i]= (1 * (k - 2) * d[i - 1] + 1 * (k - 1) * d[i - 2]) % mod
if x == 1:
ans = 1 * (k - 1) * d[n - 2] % mod
else:
ans = d[n - 1]
print(ans)
| true
|
9e66248773a52a58b940e7b70376c79db254cbfe
|
Python
|
JanBezler/Spaceships
|
/menu.py
|
UTF-8
| 3,811
| 2.90625
| 3
|
[] |
no_license
|
import run
import pygame as pg
import sys
class Menu(object):
def __init__(self):
self.screen = pg.display.set_mode((0,0),pg.FULLSCREEN)
pg.font.init()
pg.display.set_caption("Main Menu")
pg.font.init()
self.click = (0,0)
self.size = self.screen.get_size()
self.pos = (self.size[0] / 2, self.size[1] / 2)
while True:
for event in pg.event.get():
if event.type == pg.QUIT:
sys.exit(0)
elif event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE:
sys.exit(0)
elif event.type == pg.KEYDOWN and event.key == pg.K_SPACE:
run.Game()
elif event.type == pg.MOUSEBUTTONDOWN:
self.click = pg.mouse.get_pos()
if (self.click[0] > self.circlee.bottomleft[0]) and (
self.click[0] < self.circlee.bottomright[0]) and (
self.click[1] > self.circlee.topleft[1]) and (
self.click[1] < self.circlee.bottomright[1]):
self.diffile = open("diff.txt", "w")
self.poziom = "Easy"
self.diffile.write(self.poziom)
self.diffile.close()
if (self.click[0] > self.circlem.bottomleft[0]) and (
self.click[0] < self.circlem.bottomright[0]) and (
self.click[1] > self.circlem.topleft[1]) and (
self.click[1] < self.circlem.bottomright[1]):
self.diffile = open("diff.txt", "w")
self.poziom = "Medium"
self.diffile.write(self.poziom)
self.diffile.close()
if (self.click[0] > self.circleh.bottomleft[0]) and (
self.click[0] < self.circleh.bottomright[0]) and (
self.click[1] > self.circleh.topleft[1]) and (
self.click[1] < self.circleh.bottomright[1]):
self.diffile = open("diff.txt", "w")
self.poziom = "Hard"
self.diffile.write(self.poziom)
self.diffile.close()
self.screen.fill((0, 0, 0))
myfont = pg.font.SysFont('Comic Sans MS', 80)
textsurface = myfont.render("Gooood Spaceable", True, (222, 132, 50))
self.screen.blit(textsurface, (self.pos[0] - 356, self.pos[1] - 200))
myfont = pg.font.SysFont('Comic Sans MS', 30)
textsurface = myfont.render("Press <space> to play!", True, (50, 180, 130))
self.screen.blit(textsurface, (self.pos[0]-170,self.pos[1]+100))
self.circlee = pg.draw.circle(self.screen,(59,170,70),(int(self.pos[0]-80),int(self.pos[1]-30)),20)
self.circlem = pg.draw.circle(self.screen,(200,200,20),(int(self.pos[0]-20),int(self.pos[1]-30)),20)
self.circleh = pg.draw.circle(self.screen,(170,70,80),(int(self.pos[0]+40),int(self.pos[1]-30)),20)
diffread = open("diff.txt", "r")
diffread = diffread.read()
myfont = pg.font.SysFont('Comic Sans MS', 30)
textsurface = myfont.render("Difficulty: "+str(diffread), True, (200, 160, 220))
self.screen.blit(textsurface, (self.pos[0]-140 , self.pos[1] ))
myfont = pg.font.SysFont('Comic Sans MS', 30)
textsurface = myfont.render("Use <w,s,a,d> or <arrows> to move and <space> to shoot!", True, (50, 100, 130))
self.screen.blit(textsurface, (self.pos[0] - 390, self.pos[1] + 60))
pg.display.flip()
menu = Menu()
| true
|
5076a282a5d17becb128c9fc751e4c95d9c4b8c5
|
Python
|
sutha001/sickness_pj
|
/comparison.py
|
UTF-8
| 3,121
| 2.546875
| 3
|
[] |
no_license
|
import csv
import pygal
data51 = csv.reader(open("2551.txt"))
data52 = csv.reader(open("2552.txt"))
data53 = csv.reader(open("2553.txt"))
data54 = csv.reader(open("2554.txt"))
data55 = csv.reader(open("2555.txt"))
sick51 = []
sick52 = []
sick53 = []
sick54 = []
sick55 = []
givesick1 = []
givesick2 = []
givesick3 = []
for i in data51:
sick51.append(i)
for i in sick51:
if i[0] == "โรคภูมิคุ้มกันบกพร่องจากเชื้อไวรัส":
givesick1.append(int(i[1]))
for i in data52:
sick52.append(i)
for i in sick52:
if i[0] == "โรคภูมิคุ้มกันบกพร่องจากเชื้อไวรัส":
givesick1.append(int(i[1]))
for i in data53:
sick53.append(i)
for i in sick53:
if i[0] == "โรคภูมิคุ้มกันบกพร่องจากเชื้อไวรัส":
givesick1.append(int(i[1]))
for i in data54:
sick54.append(i)
for i in sick54:
if i[0] == "โรคภูมิคุ้มกันบกพร่องจากเชื้อไวรัส":
givesick1.append(int(i[1]))
for i in data55:
sick55.append(i)
for i in sick55:
if i[0] == "โรคภูมิคุ้มกันบกพร่องจากเชื้อไวรัส":
givesick1.append(int(i[1]))
for i in sick51:
if i[0] == "ไตวายเฉียบพลัน":
givesick2.append(int(i[1]))
for i in sick52:
if i[0] == "ไตวายเฉียบพลัน":
givesick2.append(int(i[1]))
for i in sick53:
if i[0] == "ไตวายเฉียบพลัน":
givesick2.append(int(i[1]))
for i in sick54:
if i[0] == "ไตวายเฉียบพลัน":
givesick2.append(int(i[1]))
for i in sick55:
if i[0] == "ไตวายเฉียบพลัน":
givesick2.append(int(i[1]))
for i in sick51:
if i[0] == "ไส้เลื่อน":
givesick3.append(int(i[1]))
for i in sick52:
if i[0] == "ไส้เลื่อน":
givesick3.append(int(i[1]))
for i in sick53:
if i[0] == "ไส้เลื่อน":
givesick3.append(int(i[1]))
for i in sick54:
if i[0] == "ไส้เลื่อน":
givesick3.append(int(i[1]))
for i in sick55:
if i[0] == "ไส้เลื่อน":
givesick3.append(int(i[1]))
print(givesick1)
print(givesick2)
print(givesick3)
line = pygal.Bar()
line.title = "เปรียบเทียบอัตราผู้ป่วยจากกลุ่มโรคทั้ง 3 โรค ตลอดระยะเวลา 5 ปี"
line.x_labels = (2551, 2552, 2553, 2554, 2555)
line.add("โรคภูมิคุ้มกันบกพร่องจากเชื้อไวรัส", givesick1)
line.add("ไตวายเฉียบพลัน", givesick2)
line.add("ไส้เลื่อน", givesick3)
line.render_to_file("pair.svg")\
| true
|
61b970be50eb3e8cda7427b9179490e51739b21f
|
Python
|
miko1004/freepacktbook
|
/freepacktbook/pushover.py
|
UTF-8
| 1,159
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import json
import requests
class PushoverNotification(object):
def __init__(self, pushover_user, pushover_token):
self.pushover_api = "https://api.pushover.net/1/messages.json"
self.pushover_user = pushover_user
self.pushover_token = pushover_token
def get_image_content(self, image_url):
return requests.get(image_url, stream=True).content
def notify(self, data):
if not all([self.pushover_user, self.pushover_token]):
return
payload = {
"user": self.pushover_user,
"token": self.pushover_token,
"title": data["title"],
"url": data["book_url"],
"url_title": data["title"],
"message": "Today's Free eBook\n%s\n%s"
% (data["title"], data["description"]),
}
try:
image_content = self.get_image_content(
data["image_url"].replace(" ", "%20")
)
except Exception:
files = None
else:
files = {"attachment": ("cover.jpg", image_content)}
requests.post(self.pushover_api, data=payload, files=files)
| true
|
86e873edfdb7851a1f73eb52db26bcc37e50fc66
|
Python
|
nancydyc/Code-Challenge
|
/reversell.py
|
UTF-8
| 1,553
| 4.1875
| 4
|
[] |
no_license
|
"""
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
"""
class Node():
def __init__(self, value):
self.value = value
self.next = None
class LinkedList():
def __init__(self):
self.head = None
def reverse_ll(head):
"""Given a singly linked list, return its reversed list. """
# put each node to a new list
new_list = []
while head.next:
new_list.append(head.next.value)
print(new_list)
# pop the list from the end to make it a new ll
reverse_ll = LinkedList()
while new_list:
if reverse_ll.head == None:
new_val = new_list.pop()
new_node = Node(new_val)
else:
new_val = new_list.pop()
new_node.next = Node(new_val)
return reverse_ll
# Solutions
def reverseList(head): # Iterative
prev, curr = None, head
while curr:
curr.next, prev, curr = prev, curr, curr.next
return prev
def reverseList_v1(head): # Recursive
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next: # when node is none (1 - 2 - 3 - 4 - 5 - none)
return head # 5
p = self.reverseList(head.next) # move all the way to the end of ll (5 - none)
head.next.next = head # the outer layer head node: add it to reversed ll (head 4 - next 5 - next next 4)
head.next = None # end one layer of call stack (head 4 - next none)
return p
| true
|
95b4e1e640106e96f7f271d35b46f5a57f1921b1
|
Python
|
bjgiraudon/pyNamo
|
/simulate.py
|
UTF-8
| 13,750
| 2.59375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 16:25:04 2020
@author: Benjamin Giraudon
STATUS : - To add : - for the executable simulation : user input payoff matrices
better toggle and idle of plotting options
user interface (buttons, slides)
- add functions to simulate and save simulation results (when no graph is wanted)
"""
import time
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import drawer
import parameters as param
def exec_sim():
print("TEST :", param.dict_test)
test = param.dict_test[int(input("-> Please enter the desired test ID :"))]
print("----------------------------------------------------")
if test == "arrow":
fig = plt.figure()
ax = fig.gca(projection = '3d', xlabel='x axis', ylabel = 'y axis', zlabel = 'z axis')
print("Testing : {}".format(test))
Ot = [-4, 3, 0]
At = [5.4, 3, 0]
Ot2 = [-4, 3, 0]
At2 = [5.4, 4.1, 0]
Ot3 = [6.5, 2, 0]
At3 = [6.5, 3.7, 0]
res1 = drawer.arrow_dyn3(Ot, At, fig, ax, 1, 0.33, 'purple', zOrder=3)
res2 = drawer.arrow_dyn3(Ot2, At2, fig, ax, 1, 0.33, 'orange', zOrder=3)
res3 = drawer.arrow_dyn3(Ot3, At3, fig, ax, 1, 0.33, 'black', zOrder=3)
N=10
res = [res1, res2, res3]
for i in range(N):
color = (random.random(), random.random(), random.random())
res.append(drawer.arrow_dyn3([random.randint(-5,5),random.randint(-5,5), 0],[random.randint(-5,5),random.randint(-5,5), 0], fig, ax, 1,0.33,color,zOrder=3))
elif test == "2P3S":
print("2P3S :", param.dict_2P3S)
example = abs(int(input("-> Please enter the desired example ID :")))
print("-----------------------------------------------------")
pMrps = param.PAYMTX_2P3S[example - 1]
print("PAYOFF MATRIX : {} -- {}".format(test, param.dict_2P3S[example]))
print(pMrps)
print("-----------------------------------------------------")
print("EQUILIBRIA CHARACTERISTICS :")
fig, ax = plt.subplots()
plt.axis('off')
ax.set_aspect(1)
start_time = time.time()
if example == 1:
drawer.setSimplex(['$R$','$P$','$S$'], pMrps, ax, 13, 53)
drawer.trajectory([0.9, 0.05], pMrps, param.step, [0.01, 0.06, 0.12, 0.2], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.5, 0], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0,0.5], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.5, 0.5], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
elif example == 2:
drawer.setSimplex(['1','2','3'], pMrps, ax, 13, 53)
drawer.trajectory([0.9, 0.05], pMrps, param.step, [0.0001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.5, 0], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0,0.5], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.5, 0.5], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.3, 0.3], pMrps, param.step, [0.0001], 10, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
elif example == 3:
drawer.setSimplex(['R','P','S'], pMrps, ax, 13, 53)
drawer.trajectory([0.5, 0.25], pMrps, param.step, [0.01], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.7, 0.1], pMrps, param.step, [0.0001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
elif example == 4:
drawer.setSimplex(['1','2','3'], pMrps, ax, 13, 53)
drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
drawer.trajectory([0.438, 0.120], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.7, 0.18], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.7, 0.11], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.25, 0.26], pMrps, param.step, [0.0001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.44, 0.497], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.31, 0.49], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.329, 0.552], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.714, 0.244], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.329, 0.163], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
elif example == 5:
drawer.setSimplex(['1','2','3'], pMrps, ax, 13, 53)
drawer.trajectory([0.2, 0.4], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.4, 0.2], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.4, 0.4], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.15, 0.7], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.15, 0.15], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.7, 0.15], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.75, 0.25], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.25, 0.75], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0, 0.75], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0, 0.25], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.75, 0], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.25, 0], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.7, 0.23], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
elif example == 6:
drawer.setSimplex(['A','B','C'], pMrps, ax, 13, 53)
drawer.trajectory([0.2, 0.4], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.4, 0.2], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.4, 0.4], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.75, 0.25], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.25, 0.75], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0.5, 0], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.trajectory([0, 0.5], pMrps, param.step, [0.001], 50, fig, ax, 'black', param.arrowSize, param.arrowWidth, 53)
drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
else:
print(" /!\ No trajectory has been set for this example /!\ ")
drawer.setSimplex(['A','B','C'], pMrps, ax, 13, 53)
# drawer.speed_plot([0, 1], [0, np.sqrt(3/4)], 50, pMrps, ax, cm.coolwarm, levels = 50, zorder=50)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray', 'white', 80, 54)
elif test == "2P2S":
print("2P2S :", param.dict_2P2S)
example = abs(int(input("-> Please enter the desired example ID :")))
print("-----------------------------------------------------")
pMrps = param.PAYMTX_2P2S[example - 1]
print("PAYOFF MATRIX : {} -- {}".format(test, param.dict_2P2S[example]))
print(pMrps[0], "PLAYER 1")
print(pMrps[1], "PLAYER 2")
print("-----------------------------------------------------")
print("EQUILIBRIA CHARACTERISTICS :")
fig, ax = plt.subplots()
ax.set_title('Phase diagram : {} -- {}'.format(test,param.dict_2P2S[example]), fontsize=14)
ax.set_aspect(1)
plt.axis('on')
start_time = time.time()
if example == 1:
drawer.setSimplex(['$p_1$', '$p_2$'], pMrps, ax, 16, 53)
drawer.trajectory([0.6,0.2], pMrps, param.step, [0.0001], 10, fig, ax, 'blue', param.arrowSize, param.arrowWidth, 20)
drawer.trajectory([0.8,0.1], pMrps, param.step, [0.01], 10, fig, ax, 'blue', param.arrowSize, param.arrowWidth, 20)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray','white', 80, 54)
if example == 2:
drawer.setSimplex(['$p_H$', '$p_D$'], pMrps, ax, 16, 53)
drawer.trajectory([0.5,0.5], pMrps, param.step,[0.01], 10,fig, ax,'blue', param.arrowSize, param.arrowWidth, 20)
drawer.trajectory([0.9,0.9], pMrps, param.step,[0.01], 10,fig, ax,'blue', param.arrowSize, param.arrowWidth, 20)
drawer.trajectory([0.8,0.1], pMrps, param.step,[0.001], 30,fig, ax,'blue', param.arrowSize, param.arrowWidth, 20)
drawer.trajectory([0.1,0.8], pMrps, param.step,[0.001], 30,fig, ax,'blue', param.arrowSize, param.arrowWidth, 20)
eqs = drawer.equilibria(pMrps, ax, 'black', 'gray','white', 80, 54)
elif test == "2P4S":
print("2P4S :", param.dict_2P4S)
example = abs(int(input("-> Please enter the desired example ID :")))
print("-----------------------------------------------------")
pMrps = param.PAYMTX_2P4S[example - 1]
print("PAYOFF MATRIX : {} -- {}".format(test, param.dict_2P4S[example]))
print(pMrps)
print("-----------------------------------------------------")
print("EQUILIBRIA CHARACTERISTICS :")
fig = plt.figure()
ax = fig.gca(projection = '3d', xlabel='x axis', ylabel = 'y axis', zlabel = 'z axis')
ax.set_aspect(1)
ax.set_axis_off()
start_time = time.time()
if example == 1:
drawer.setSimplex(['$R$', '$P$', '$S$', '$T$'], pMrps, ax, 13, 53)
#eqs = drawer.equilibria(pMrps, ax, 'black', 'gray','white', 80, 2)
if example == 2:
drawer.setSimplex(["1", "2", "3", "4"], pMrps, ax, 13, 53)
drawer.trajectory([0.2, 0.25, 0.25], pMrps, param.step, [0.0001, 0.01, 0.05, 0.08, 0.1, 0.15, 0.175, 0.2, 0.3, 0.4, 0.5, 0.7, 0.8, 0.9, 0.99], 30, fig, ax,'lightgrey', param.arrowSize*10, param.arrowWidth*10, 20)
#eqs = drawer.equilibria(pMrps, ax, 'black', 'gray','white', 80, 2)
if example == 3:
drawer.setSimplex(["$R$", "$P$", "$S$", "$T$"], pMrps, ax, 13, 53)
#eqs = drawer.equilibria(pMrps, ax, 'black', 'gray','white', 80, 2)
if test != "arrow" and test != "2P4S":
print("-----------------------------------------------------")
print("EQUILIBRIA TYPES:")
print("{} SOURCES".format(len(eqs[0])))
print("{} SADDLES".format(len(eqs[1])))
print("{} SINKS".format(len(eqs[2])))
print("{} CENTRES".format(len(eqs[3])))
print("{} NON-HYPERBOLIC".format(len(eqs[4])))
print("-----------------------------------------------------")
print("Execution time : %s seconds" % round((time.time() - start_time), 3))
return None
| true
|
8bab97e207cdeb842eeb487fd52f1a1ecbcfebd4
|
Python
|
lion963/SoftUni-Python-Fundamentals-
|
/Exercise Dictionaries/ForceBook.py
|
UTF-8
| 876
| 3.078125
| 3
|
[] |
no_license
|
force_book_dict = {}
users = {}
command = input()
while not command == 'Lumpawaroo':
if '|' in command:
flag = False
side, user = command.split(' | ')
if user not in users:
users[user] = side
elif '-' in command:
user, side = command.split(' -> ')
users[user] = side
print(f'{user} joins the {side} side!')
command = input()
for key, value in users.items():
if value not in force_book_dict:
force_book_dict[value] = []
force_book_dict[value].append(key)
else:
force_book_dict[value].append(key)
force_book_dict = dict(sorted(force_book_dict.items(), key=lambda x: (-len(x[1]), x[0])))
for key, value in force_book_dict.items():
if len(value) > 0:
print(f'Side: {key}, Members: {len(value)}')
for el in sorted(value):
print(f'! {el}')
| true
|
69e93397ad60a70ed4e43020a1b1317d0583589d
|
Python
|
frclasso/turma3_Python1_2018
|
/Cap06_estruturas_decisao/03_elif.py
|
UTF-8
| 157
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env python3
x = 6
y = 6
# if ==> se
if x > y:
print('x é o maior')
elif x < y:
print('x é menor que y')
else:
print('Sao iguais')
| true
|
3142e8056c15cea5ca9ae5bd1cfc3b4ff679f528
|
Python
|
cilame/any-whim
|
/auto_minewin7/CreateCate.py
|
UTF-8
| 6,619
| 2.96875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import os, pickle
from collections import OrderedDict
import cv2
import numpy as np
class CreateCate(object):
"""
categorical_crossentropy 训练的样本生成器
功能:
读取根据文件夹名字进行图片的读入
*生成对应的完整的 one-hot 训练数据集
*将其作为可进行训练的 numpy 数据类型直接使用
e.g: >>>s = CreateCate(picpath) # 注意默认参数 create=True
>>>s.x # 可直接使用。 s.x.shape->(n, height, width, channel)
>>>s.y # 可直接使用。 s.y.shape->(n, s.class_num) one-hot 数据型
在实例化的时候即将该路径下的文件夹名字作为类名
支持多路径功能
"""
def __init__(self, *args, **kw):
"""
*args:
仅接受图片类文件路径
picpath1+>classA+>pic1
| |>pic2
| +>pic3
+>classB...
+>classC...
picpath2+>classD...
+>classB...
+>classC...
s.classes = [classA, classB, classC, classD]
可以直接多填
也可以填写一个list
e.g: CreateCate(picpath1,picpath2,**kw)
CreateCate([picpath1,picpath2],**kw)
**kw:
create = True
是否在实例化时候直接读取数据
over2show = 200
读取图片数据时候,超过多少将进行读取进度的显示
nptype = np.float32
读取数据的整体格式
"""
self.__args = args
self.__create = kw.get('create', True)
self.__over2show = kw.get('over2show', 200)
self.__nptype = kw.get('nptype', np.float32)
self.classes = self.__path2class()
self.class_num = len(self.classes)
self.__files = self.__get_paths_tree()
self.__filters = ['jpg', 'png',]
self.__cates = map(tuple, np.eye(self.class_num).tolist())
self.__cates2class = OrderedDict(zip(self.__cates, self.classes))
self.__class2cates = OrderedDict(zip(self.classes, self.__cates))
if self.__create:
self.__create_XnY()
else:
self.get_XnY = self.__get_XnY
def __get_paths(self):
if not self.__args:
self.__create = False
return self.__args
typestrs = all(map(lambda i:type(i)==str,self.__args))
typelist = (len(self.__args)==1 and (type(self.__args[0])==list or type(self.__args[0])==tuple))
if not typelist and not typestrs:
raise 'args only accept some picpath string or a picpath list.'
if typestrs: paths = self.__args
if typelist: paths = self.__args[0]
return paths
def __get_paths_tree(self):
paths = self.__get_paths()
classes_paths = {}.fromkeys(self.classes)
for path in paths:
for i in filter(lambda i:os.path.isdir(os.path.join(path,i)), os.listdir(path)):
if not classes_paths[i]:
classes_paths[i] = [os.path.join(path,i)]
else:
classes_paths[i] += [os.path.join(path,i)]
return classes_paths
def __path2class(self):
paths = self.__get_paths()
classes = set()
for path in paths:
for i in filter(lambda i:os.path.isdir(os.path.join(path,i)), os.listdir(path)):
classes.add(i)
return list(classes)
def save_mapdict(self, name):
"""
将类名以及产生的 one-hot 数据进行对应的 mapdict 进行保存
e.g:
>>>s.save_mapdict('cls.pickle')
会生成 cls.pickle 文件
"""
pickle.dump(self.__cates2class, open(name,'w'))
@staticmethod
def load_mapdict(name):
"""
将类名以及产生的 one-hot 数据进行对应的 mapdict 进行读取
e.g:
>>>cates2class = s.save_mapdict('cls.pickle')
会读取 cls.pickle 文件
"""
return pickle.load(open(name))
@staticmethod
def get_class_by_cate(cates2class, l):
"""
通过读取 mapdict 以及一个 one-hot 查找对应的类名
因为该函数为静态函数,所以使用时可以不图片加载地址
e.g:
>>>s = CreateCate()
>>>cates2class = s.load_mapdict('cls.pickle')
>>># 通过 load_mapdict 加载 cls.pickle 文件
>>>s.get_class_by_cate(cates2class, l)
"""
s = np.array(l).ravel()
## s[s>=.5], s[s< .5] = 1., 0.
s[s==np.max(s)],s[s!=np.max(s)] = 1., 0.
cate = tuple(s.tolist())
return cates2class[cate]
## def get_class_by_cate_test(self, l):
## s = np.array(l).ravel()
## s[s>=.8], s[s< .8] = 1., 0.
## cate = tuple(s.tolist())
## return self.__cates2class[cate]
def __create_sample(self, picpath):
pics = [os.path.join(picpath, i) \
for i in os.listdir(picpath) \
if lambda b:b[-3:].lower()in self.__filters]
return pics
def __get_allnum(self):
allnum = 0
for cls in self.classes:
for picpath in self.__files[cls]:
allnum += len(os.listdir(picpath))
return allnum
def __create_samples(self):
num = 1
show = False
allnum = self.__get_allnum()
showp = map(int, (np.arange(0,1.1,.1)* allnum).tolist())
if allnum > self.__over2show: show = True
x, y = [], []
for cls in self.classes:
for picpath in self.__files[cls]:
for pic in self.__create_sample(picpath):
if show and num in showp:
print ('[*]%6d num. %6.2f%% pics has load.') % (num, float(num)/allnum*100)
x += [cv2.imread(pic).astype(self.__nptype)]
y += [self.__class2cates[cls]]
num += 1
return np.array(x), np.array(y).astype(self.__nptype)
def __create_XnY(self):
self.x, self.y = self.__create_samples()
if len(self.x.shape) == 1:
print '[*]WARNING! self.x.shape:%s'%str(self.x.shape)
print '[*]you must ensure all pic shape is same.'
def __get_XnY(self):
if not (hasattr(self, 'x') or hasattr(self, 'x')):
self.__create_XnY()
return self.x, self.y
| true
|
d2b9eb73ee06fb05ec3dd6ac3d8fdc9d8ee8adf7
|
Python
|
gnboorse/binpacking
|
/tools/generator/generate_all.py
|
UTF-8
| 1,267
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import subprocess
import shutil
import os
import os.path
'''
Python script used for generating all of the
JSON files needed for the test plan.
'''
# algorithms in use
ALGORITHMS = [
"NextFit",
"FirstFit",
"FirstFitDecreasing",
"BestFit",
"BestFitDecreasing",
"PackingConstraint",
"BinCompletion",
"ModifiedFirstFitDecreasing"
]
DUPLICATES = 10000
def main():
# iterate through algorithms
for algorithm in ALGORITHMS:
# iterate through item sizes
for item_size_percent in [25, 50, 75]:
# iterate through item counts
for item_count in [50, 100, 500]:
# iterate through item variances
for item_variance in [1, 2, 3]:
# generate test case
bashCommand = f'./generator -algorithm={algorithm} -count={item_count} -dups={DUPLICATES} -variability={item_variance} -center={item_size_percent} -output={algorithm}'
print(f'Running bash command: {bashCommand}')
process = subprocess.Popen(
bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if __name__ == '__main__':
main()
| true
|
fff01d0f2741ee2e815758cca6f1b7a6234ea693
|
Python
|
student50/ssbm-top50-api
|
/melee.py
|
UTF-8
| 1,410
| 3.53125
| 4
|
[] |
no_license
|
import csv
def print_player(player):
matchup_data = []
json = {}
csvFile = open('2018h2h.csv')
csvReader = csv.reader(csvFile)
csvData = list(csvReader)
csvData[0] = list(filter(None, csvData[0])) #filters all the empty strings
opponents = csvData[0] #csvData[0] changed to opponents for readability
for row in csvData[1:]:
if player in row:
for col in row:
matchup_data.append(col) #puts all values into matchup_data
counter = 0
for i in range(1,len(matchup_data),2):
if matchup_data[i] == '': #turns empty string to 0's
matchup_data[i] = '0'
matchup_data[i+1] = '0'
json[str(opponents[counter])] = {'wins': matchup_data[i],'losses': matchup_data[i+1]}
counter += 1
csvFile.close()
return json
'''
def print_all(): #prints out all rows
csvFile = open('2018h2h.csv')
csvReader = csv.reader(csvFile)
csvData = list(csvReader)
csvData[0] = list(filter(None, csvData[0])) #filters all the empty strings
counter=0
for row in csvData:
counter+=1
print('row:',counter)
for col in row:
print(col, end='')
print('\n')
'''
| true
|
79759c3f32bdfd53f1d2133c8333a9fdf7494827
|
Python
|
crylearner/PythonRpcFramework
|
/rpc/json/message/Response.py
|
UTF-8
| 1,596
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
'''
Created on 2015-5-25
@author: 23683
'''
import json
from rpc.json.message.RpcMessage import RpcMessage, MSG_KEY_ID, MSG_KEY_ERROR, \
MSG_KEY_RESULT, MSG_KEY_PARAMS
class Response(RpcMessage):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
super().__init__()
self.ID = 0
self.result = None
self.error = None
def __str__(self):
return self.encode()
def encode(self):
if self.error:
return json.dumps({MSG_KEY_ID:self.ID, MSG_KEY_ERROR:self.error})
else:
return json.dumps({MSG_KEY_ID:self.ID, MSG_KEY_RESULT:self.result})
def decode(self, bytestring):
message = json.loads(bytestring)
self.decodeFromJson(message)
def decodeFromJson(self, message):
if MSG_KEY_ID not in message:
raise Exception("%s has no id" % str(message))
self.ID = message.get(MSG_KEY_ID)
if MSG_KEY_ERROR not in message and MSG_KEY_RESULT not in message:
raise Exception("%s has neither result nor error words" %str(message))
self.error = message.get(MSG_KEY_ERROR, None)
self.result = message.get(MSG_KEY_RESULT, None)
if __name__ == '__main__':
response = Response()
response.ID = 1
response.result = True
print(response)
response2 = Response()
response2.decode(response.encode())
print(response2)
| true
|
84fa7818585339e24a63d4ff1e3fce742c8a2848
|
Python
|
NarishSingh/Python-3-Projects
|
/randnumfileIO/RandNumIO.py
|
UTF-8
| 2,107
| 3.84375
| 4
|
[] |
no_license
|
# Random Number Generator File IO
# Date Created: 6/12/20
# Last Modified: 6/18/20
import math
import random
import sys
NUMBER_FILE = "nums.txt"
DELIMITER = " "
def write_nums(num_limit, rand_min, rand_max):
"""
write random numbers within range to file in rows of 10 digits
:param num_limit: number of randoms to write
:param rand_min: min of range for randomization
:param rand_max: max of range for randomization
"""
try:
nums = open(NUMBER_FILE, "w")
except IOError:
print("Could not open number file")
sys.exit()
for i in range(0, (num_limit + 1)):
nums.write(str(random.randint(rand_min, rand_max)) + DELIMITER)
nums.close()
def average_of_file(num_limit):
"""
read in the randoms from file and calculate the average
:param num_limit: amount of numbers in file
:return: the average of all the randoms
"""
try:
rands = open(NUMBER_FILE, "r")
except IOError:
print("Could not open number file")
sys.exit()
num_total = 0
num_list = rands.readline().rstrip(DELIMITER).split(DELIMITER)
rands.close()
# print(num_list) # debug
for n in num_list:
num_total += int(n)
return num_total / num_limit
def main():
print("Welcome to the random number generator")
rn_limit = int(input("Enter the number of randoms you would like printed to file: "))
lower_bound = int(input("Enter the lower bound of range for randoms: "))
upper_bound = int(input("Enter the upper bound of range for randoms: "))
while math.isnan(rn_limit) or math.isnan(lower_bound) or math.isnan(upper_bound):
print("Invalid input")
rn_limit = int(input("Enter the number of randoms you would like printed to file: "))
lower_bound = int(input("Enter the lower bound of range for randoms: "))
upper_bound = int(input("Enter the upper bound of range for randoms: "))
write_nums(rn_limit, lower_bound, upper_bound)
avg = average_of_file(rn_limit)
print("The average from file is " + format(avg, '.2f'))
main()
| true
|
338e05845964c9c72e483aece71f4ca3465aef10
|
Python
|
ethan9carpenter/Python-Crash-Course
|
/alienInvasion/gameFunctions.py
|
UTF-8
| 7,229
| 2.84375
| 3
|
[] |
no_license
|
import pygame
import sys
from bullet import Bullet
from alien import Alien
from time import sleep
class GameFunctions():
def __init__(self, settings, screen, ship, bullets, playButton, stats, aliens, scoreboard):
self.settings = settings
self.screen = screen
self.ship = ship
self.bullets = bullets
self.playButton = playButton
self.stats = stats
self.aliens = aliens
self.scoreboard = scoreboard
def checkEvents(self):
#Perform actions when an event occurs
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self.checkKeydownEvents(event)
elif event.type ==pygame.KEYUP:
self.checkKeyupEvents(event)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.checkPlayButton()
def checkKeydownEvents(self, event):
#Perform actions when a key is pressed
if event.key == pygame.K_RIGHT:
self.ship.movingRight=True
elif event.key == pygame.K_LEFT:
self.ship.movingLeft=True
elif event.key == pygame.K_SPACE:
self.fireBullet()
elif event.key == pygame.K_q:
sys.exit()
def checkKeyupEvents(self, event):
#Perform actions when a key is released
if event.key == pygame.K_RIGHT:
self.ship.movingRight = False
elif event.key == pygame.K_LEFT:
self.ship.movingLeft = False
def fireBullet(self):
#Fires a new Bullet if appropriate
if len(self.bullets) < self.settings.maxBullets:
self.bullets.add(Bullet(self.settings, self.screen, self.ship))
def updateBullets(self):
"""Update the Group of bullets to reflect visual changes
and delete any Bullet that has left the screen"""
self.bullets.update()
for bullet in self.bullets.copy():
if bullet.rect.y < 0:
self.bullets.remove(bullet)
self.checkCollisions()
def checkCollisions(self):
"""Checks for collisions and automatically removes them because
of the two parameters that are set to True"""
initAlienLength = len(self.aliens)
collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)
changeAlienLength = initAlienLength - len(self.aliens)
self.checkHighScore()
self.stats.score += self.settings.alienScore*changeAlienLength
self.scoreboard.setupScoreboard()
if len(self.aliens) == 0:
self.settings.levelUp()
self.stats.level += 1
self.scoreboard.setupLevel()
self.bullets.empty()
self.createFleet()
def updateScreen(self):
#Refresh the screen to reflect changes to the game
self.screen.fill(self.settings.backgroundColor)
self.ship.blitme()
self.scoreboard.display()
for bullet in self.bullets.sprites():
if self.stats.gameActive:
bullet.drawBullet()
self.aliens.draw(self.screen)
if not self.stats.gameActive:
self.playButton.drawButton()
pygame.display.flip()
def getMaxAliensX(self, width):
#Maximum number of Aliens that can fit Horizontally
availableSpace = self.settings.screenWidth-2*width
return int(availableSpace/(2*width))
def getMaxAliensY(self, height):
#Maximum number of Aliens that can fit vertically
availableSpace=self.settings.screenHeight-3*height-self.ship.rect.height
return int(availableSpace/(2*height))
def createAlien(self, width, column, row):
#Creates an Alien
alien=Alien(self.settings, self.screen)
alien.x=width+2*width*column
alien.y=alien.rect.height+2*alien.rect.height*row
alien.rect.x=alien.x
alien.rect.y=alien.y
self.aliens.add(alien)
def createFleet(self):
#Create a fleet of Aliens in the Group aliens
rectangle=Alien(self.settings, self.screen).rect
width = rectangle.width
height = rectangle.height
maxAliensX = self.getMaxAliensX(width)
maxAliensY = self.getMaxAliensY(height)
for column in range(maxAliensX):
for row in range(maxAliensY):
self.createAlien(width, column, row)
def isAlienAtBottom(self):
#Determines whether an Alien has reached the bottom of the screen
screenBottom = self.screen.get_rect().bottom
for alien in self.aliens.sprites():
if alien.rect.bottom >= screenBottom:
return True
def updateAliens(self):
#
self.checkFleetEdges()
self.aliens.update()
if self.isAlienAtBottom() or pygame.sprite.spritecollideany(self.ship, self.aliens):
self.shipHit()
def checkFleetEdges(self):
"""Determine if an Alien has reach the edge of the screen,
changing the direction of the fleet and moving the fleet down
if appropriate."""
for alien in self.aliens.sprites():
if alien.isAtEdge():
self.changeFleetDirection()
break
def changeFleetDirection(self):
"""Change the direction of the fleet and move the fleet down"""
self.settings.alienSpeed *= -1
for alien in self.aliens.sprites():
alien.rect.y += self.settings.fleetDropSpeed
def shipHit(self):
"""Determines and performs what will happen given the Ship
has been hit"""
if self.stats.remainingShips > 0:
self.stats.remainingShips-=1
self.aliens.empty()
self.bullets.empty()
self.scoreboard.setupRemainingShips()
self.createFleet()
self.ship.centerShip()
sleep(.5)
else:
self.stats.gameActive=False
pygame.mouse.set_visible(True)
def checkPlayButton(self):
"""Checks to see if the mouse has clicked the PlayButton, and
if appropriate, will begin a new game"""
position=pygame.mouse.get_pos()
if not self.stats.gameActive and self.playButton.rect.collidepoint(position):
self.stats.resetStats()
self.stats.gameActive=True
self.aliens.empty()
self.bullets.empty()
self.scoreboard.setupAll()
self.settings.loadSettings()
self.createFleet()
self.ship.centerShip()
pygame.mouse.set_visible(False)
def checkHighScore(self):
if self.stats.score > self.stats.highScore:
self.stats.highScore = self.stats.score
self.scoreboard.setupHighScore()
| true
|
3c3128644abdcaa3ced60494f7f4295afe25e72a
|
Python
|
santosh-code/decision_tree
|
/diabetes.py
|
UTF-8
| 1,353
| 3.125
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
data = pd.read_csv("C:/Users/USER/Desktop/DT/Diabetes.csv")
dum1=pd.get_dummies(data['_Class_variable'],drop_first=True)
data .columns = [c.replace(' ', '_') for c in data .columns]
final=pd.concat([data,dum1],axis="columns")
final1=final.drop(['_Class_variable'],axis="columns")
x=final1.iloc[:,:-1]
y=final1.iloc[:,8]
from sklearn.model_selection import train_test_split
train_x,test_x,train_y,test_y = train_test_split(x,y,test_size = 0.2,random_state=42)
from sklearn.tree import DecisionTreeClassifier as DT
help(DT)
model = DT(criterion = 'entropy')
model.fit(train_x,train_y)
# Prediction on Train Data
preds=model.predict(train_x)
np.mean(preds==train_y) # Train acc=1.0
# Prediction on Test Data
preds = model.predict(test_x)
np.mean(preds==test_y) # Test Data Accuracy =0.74
##########pre-pruning
params = {'max_depth': [2,4,6,8,10,12],
'min_samples_split': [2,3,4],
'min_samples_leaf': [1,2]}
gcv = GridSearchCV(estimator=model,param_grid=params)
gcv.fit(train_x,train_y)
model1 = gcv.best_estimator_
model1.fit(train_x,train_y)
pre=model1.predict(train_x)
np.mean(pre==train_y)##train acc=0.79
pre=model1.predict(test_x)
np.mean(pre==test_y)##test acc=0.71
| true
|
02598cc5cef97391d0bf86f3e1ecc3707ca74af2
|
Python
|
jeethesh-pai/Computer-Vison-and-ML-Assignments
|
/sheet6/utils.py
|
UTF-8
| 3,235
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
def showImage(img, show_window_now=True, _ax=None):
img, color_img = convertColorImagesBGR2RGB(img)
if _ax is None:
plt_img = plt.imshow(img, interpolation='antialiased', cmap=None if color_img else 'gray')
else:
plt_img = _ax.imshow(img, interpolation='antialiased', cmap=None if color_img else 'gray')
plt.axis('off')
plt.tight_layout()
if show_window_now:
plt.show()
return plt_img
def showImages(imgs, num_cols=None, show_window_now=True, transpose=False, spacing=None, padding=None):
"""
imgs:
[image|('caption', image)|None, ...]
list of images
num_cols:
int | None
transpose:
True | False
flip rows and columns
show_window_now:
True | False
spacing:
(int, int)
horizontal and vertical spacing between images
padding:
(int, int, int, int)
left, bottom, right, top paddding
"""
plt_imgs = []
i = 0
tmp_imgs = []
for img in imgs:
tmp = type('', (), {})()
if num_cols is None:
tmp.pos = (0, i)
else:
tmp.pos = (i // num_cols, i % num_cols)
if transpose:
tmp.pos = tmp.pos[::-1]
tmp.img = img
tmp.title = None
tmp.span = (1, 1)
if img is not None:
if type(img) is tuple:
tmp.img = img[1]
tmp.title = img[0]
if len(img) > 2:
tmp.span = img[2]
i += tmp.span[0] * tmp.span[1]
else:
i += 1
tmp_imgs.append(tmp)
if num_cols is None:
grid = (1, i)
else:
num_rows = (i - 1) // num_cols + 1
grid = (num_rows, num_cols)
if transpose:
grid = grid[::-1]
for img in tmp_imgs:
if img.img is not None or img.title is not None:
ax = plt.subplot2grid(grid, img.pos, colspan=img.span[0], rowspan=img.span[1])
if img.img is not None:
plt_imgs.append(showImage(img.img, False, _ax=ax))
if img.title is not None:
ax.set_title(img.title)
plt.tight_layout()
if spacing is not None:
plt.subplots_adjust(wspace=spacing[0], hspace=spacing[1])
if padding is not None:
plt.subplots_adjust(left=padding[0], bottom=padding[1], right=1 - padding[2], top=1 - padding[3])
if show_window_now:
plt.show()
return plt_imgs
def convertColorImagesBGR2RGB(img):
is_color_img = len(img.shape) == 3 and img.shape[2] == 3
if is_color_img:
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img_rgb, True
return img, False
def from0_1to0_255asUint8(float_img):
img = float_img * 255
return asUint8(img)
def clip0_255asUint8(float_img):
img = float_img.copy()
np.clip(float_img, 0, 255, img)
return asUint8(img)
def asUint8(float_img):
return float_img.astype(np.uint8)
def PLACEHOLDER(img):
return np.zeros(img.shape, np.uint8)
def PLACEHOLDER_IMG(img):
return img.copy()
def REPLACE_THIS(input):
return input
def REPLACE_THIS_MODEL(input):
return 1
| true
|
c3ba0af4e0b4f02463989196ba7df4827770dcbc
|
Python
|
BhatnagarKshitij/Algorithms
|
/LinkedList/mergeTwoSortedList.py
|
UTF-8
| 1,323
| 3.78125
| 4
|
[] |
no_license
|
'''
Question link: https://leetcode.com/problems/merge-two-sorted-lists/
Merge two sorted linked lists and return it as a sorted list.
The list should be made by splicing together the nodes of the first two lists.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if not l1 and not l2:
return
elif not l1:
return l2
elif not l2:
return l1
sortedListNode=ListNode()
head=sortedListNode
while l1 and l2:
if l1.val<l2.val:
sortedListNode.next=ListNode(l1.val)
l1=l1.next
else:
sortedListNode.next=ListNode(l2.val)
l2=l2.next
sortedListNode=sortedListNode.next
while l1:
sortedListNode.next=ListNode(l1.val)
sortedListNode=sortedListNode.next
l1=l1.next
while l2:
sortedListNode.next=ListNode(l2.val)
sortedListNode=sortedListNode.next
l2=l2.next
return head.next
| true
|
ea9393de2256ebfd5192c2b5a93f3d066fc7d7ac
|
Python
|
zinsmatt/Programming
|
/CodeForces/785A-Anton_and_Polyhedrons.py
|
UTF-8
| 279
| 3.8125
| 4
|
[] |
no_license
|
n = int(input())
s = 0
for i in range(n):
p = input()
if p == "Tetrahedron":
s += 4
elif p == "Cube":
s += 6
elif p == "Octahedron":
s += 8
elif p == "Dodecahedron":
s += 12
elif p == "Icosahedron":
s += 20
print(s)
| true
|
e11863f6f99d081feb8273fa4e3cb35d7a5a066a
|
Python
|
dimDamyanov/PythonOOP
|
/Exams/2-apr-2020/skeleton/tests/test_controller.py
|
UTF-8
| 4,260
| 2.859375
| 3
|
[] |
no_license
|
import unittest
from project.controller import Controller
from project.player.advanced import Advanced
from project.player.beginner import Beginner
from project.card.magic_card import MagicCard
from project.card.trap_card import TrapCard
class TestBattleField(unittest.TestCase):
def initialize_players_with_cards(self) -> None:
self.controller.add_player('Beginner', 'John')
self.controller.add_player('Advanced', 'Mike')
self.controller.add_card('Magic', 'MagicCard1')
self.controller.add_card('Magic', 'MagicCard2')
self.controller.add_card('Trap', 'TrapCard1')
self.controller.add_card('Trap', 'TrapCard2')
self.controller.add_player_card('John', 'MagicCard1')
self.controller.add_player_card('John', 'TrapCard1')
self.controller.add_player_card('Mike', 'MagicCard2')
self.controller.add_player_card('Mike', 'TrapCard2')
def setUp(self) -> None:
self.controller = Controller()
def test_init_attrs_set(self) -> None:
self.assertEqual(self.controller.player_repository.count, 0)
self.assertEqual(self.controller.player_repository.players, [])
self.assertEqual(self.controller.card_repository.count, 0)
self.assertEqual(self.controller.card_repository.cards, [])
def test_add_beginner_player(self) -> None:
msg = self.controller.add_player('Beginner', 'John')
self.assertEqual(msg, 'Successfully added player of type Beginner with username: John')
self.assertEqual(self.controller.player_repository.count, 1)
self.assertEqual(self.controller.player_repository.players[0].username, 'John')
self.assertTrue(isinstance(self.controller.player_repository.players[0], Beginner))
def test_add_advanced_player(self) -> None:
msg = self.controller.add_player('Advanced', 'John')
self.assertEqual(msg, 'Successfully added player of type Advanced with username: John')
self.assertEqual(self.controller.player_repository.players[0].username, 'John')
self.assertTrue(isinstance(self.controller.player_repository.players[0], Advanced))
def test_add_card_magic(self) -> None:
msg = self.controller.add_card('Magic', 'Card')
self.assertEqual(msg, 'Successfully added card of type MagicCard with name: Card')
self.assertEqual(self.controller.card_repository.count, 1)
self.assertEqual(self.controller.card_repository.cards[0].name, 'Card')
self.assertTrue(isinstance(self.controller.card_repository.cards[0], MagicCard))
def test_add_card_trap(self) -> None:
msg = self.controller.add_card('Trap', 'Card')
self.assertEqual(msg, 'Successfully added card of type TrapCard with name: Card')
self.assertEqual(self.controller.card_repository.count, 1)
self.assertEqual(self.controller.card_repository.cards[0].name, 'Card')
self.assertTrue(isinstance(self.controller.card_repository.cards[0], TrapCard))
def test_add_player_card(self) -> None:
self.controller.add_card('Magic', 'Card')
self.controller.add_player('Beginner', 'John')
msg = self.controller.add_player_card('John', 'Card')
self.assertEqual(msg, 'Successfully added card: Card to user: John')
self.assertEqual(self.controller.player_repository.find('John').card_repository.count, 1)
self.assertEqual(self.controller.player_repository.find('John').card_repository.cards[0].name, 'Card')
self.assertTrue(isinstance(self.controller.player_repository.find('John').card_repository.cards[0], MagicCard))
def test_fight_method(self) -> None:
self.initialize_players_with_cards()
msg = self.controller.fight('John', 'Mike')
self.assertEqual(msg, 'Attack user health 50 - Enemy user health 150')
def test_report_method(self) -> None:
self.initialize_players_with_cards()
self.assertEqual(self.controller.report(),
'Username: John - Health: 50 - Cards 2\n### Card: MagicCard1 - Damage: 5\n### Card: TrapCard1 - Damage: 120\nUsername: Mike - Health: 250 - Cards 2\n### Card: MagicCard2 - Damage: 5\n### Card: TrapCard2 - Damage: 120\n')
if __name__ == '__main__':
unittest.main()
| true
|
4aa384178957b1e1fa9b4d3241a7eba0cd3e3226
|
Python
|
mrobotique/python_dash
|
/indoortemp_sender.py
|
UTF-8
| 1,865
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/python
"""
Created on Tue Oct 17 16:32:14 2017
@author: mromero
"""
import yaml #Pefs load
import paho.mqtt.client as paho #mqtt lib
import os
import glob
import time
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def on_publish(client, userdata, mid):
print("mid: "+str(mid))
def getPrefs(PrefFile):
f = open(PrefFile,"r")
MyPrefs = f.read()
return yaml.load(MyPrefs)
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return [temp_c, temp_f]
if __name__ == "__main__":
try:
PrefFile = "prefs.yaml"
TopicName = "dashboard/sensors/indoortemp"
YamlPrefs = getPrefs(PrefFile)
client = paho.Client()
client.on_publish = on_publish
client.connect(YamlPrefs['mosquitto']['server'],YamlPrefs['mosquitto']['port'])
client.loop_start()
while(True):
Temp = read_temp()
print Temp
if (Temp[0]<0):
Temp[0] = round(Temp[0])
else:
Temp[0] = round(Temp[0],1)
(rc, mid) = client.publish(TopicName, str(Temp[0]), qos=2)
time.sleep(YamlPrefs['temperature']['refreshing_rate'])
except:
(rc, mid) = client.publish(TopicName, "KO", qos=2)
time.sleep(0.1)
print "fatal error"
| true
|
4a922cfa19614ad1f88fd9c37b17318d8aaaf263
|
Python
|
mark-ni/competitive-programming
|
/usaco/Contests/usaco_silver_3.py
|
UTF-8
| 644
| 2.84375
| 3
|
[] |
no_license
|
with open('mountains.in', 'r') as fin:
count = int(fin.readline().strip())
mountainList = []
for i in range(count):
line = fin.readline().strip().split(' ')
x = int(line[0])
y = int(line[1])
b1 = x + y
b2 = y - x
fax = False
for mountain in mountainList:
if b1 <= mountain[0] and b2 <= mountain[1]:
fax = True
break
if not fax:
for mountain in mountainList:
if b1 >= mountain[0] and b2 >= mountain[1]:
mountainList.remove(mountain)
mountainList.append([b1, b2])
with open('mountains.out', 'w') as fout:
fout.write(str(len(mountainList)))
fout.close
| true
|
efb298a8c2b5f3a229f55fdc4cb0d3ac941de97d
|
Python
|
frank2019/tech_note_blog
|
/common/python/tts.py
|
UTF-8
| 612
| 2.75
| 3
|
[] |
no_license
|
import win32com.client as wincl
from tkinter import *
def text2Speech():
text = e.get()
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Speak(text)
#window configs
tts = Tk()
tts.wm_title("Text to Speech")
tts.geometry("600x400")
tts.config(background="#708090")
f=Frame(tts,height=600,width=800,bg="#bebebe")
f.grid(row=0,column=0,padx=10,pady=5)
lbl=Label(f,text="输入需要转换的文本 : ")
lbl.grid(row=1,column=0,padx=10,pady=2)
e=Entry(f,width=80)
e.grid(row=2,column=0,padx=10,pady=2)
btn=Button(f,text="语音输出",command=text2Speech)
btn.grid(row=3,column=0,padx=20,pady=10)
tts.mainloop()
| true
|
fd46b19a421208a1df74ebdd198f63e18395ddc1
|
Python
|
Exorust/Fuzzy-Time-Series-Analysis
|
/plot.py
|
UTF-8
| 587
| 2.75
| 3
|
[] |
no_license
|
import pandas
import numpy as np
import matplotlib.pyplot as plt
df = pandas.read_csv('newdata.csv')
# print(df)
X = df["Days"].values
# print(X)
Y_Mactual = df["Mactual"].values
Y_Mpred = df["Mpred"].values
Y_Tactual = df["Tactual"].values
Y_Tpred = df["Tpred"].values
plt.plot(X,Y_Mactual,color="red",label="Actual Values")
plt.plot(X,Y_Mpred, color="blue",label="Predicted Values")
plt.suptitle('Mesophilic TS in Waste Solids', fontsize=20)
plt.xlabel('Days', fontsize=14)
plt.ylabel('TS (g/l)', fontsize=14)
plt.legend(loc='upper left')
# plt.show()
plt.savefig('Meso_fides.jpg')
| true
|
f1580cb6fbfff1391975c69caf447f3c0b5ba846
|
Python
|
starman011/python_programming
|
/02_Lists&tuples/01_lists.py
|
UTF-8
| 152
| 3.71875
| 4
|
[] |
no_license
|
#creating a list using []
a = [1,3,23,4,3]
#print the list with index using a[0]
print(a[0])
#using different types
b = [1,'saqlain' ,1.50]
print(b[0:])
| true
|
43ab2123d1ae3d9ea4d6dbb5baec12260607d3ff
|
Python
|
sai-kumar-peddireddy/PythonLearnigTrack
|
/Strings/DocStrings.py
|
UTF-8
| 729
| 4
| 4
|
[] |
no_license
|
"""
Sun Jul 22 15:13:47 IST 2018
source :- https://www.geeksforgeeks.org/python-docstrings/
It’s specified in source code that is used, like a comment, to document a specific segment of code.
Unlike conventional source code comments, the docstring should describe what the function does, not how.
"""
def additionfun(parm1, parm2):
"""
This is a Doc string here we can say how to use this function.
this function returns sum of 2 numbers
:param parm1: pass any number
:param parm2: pass any number
:return:sum of 2 numbers
"""
return parm1 + parm2
print("usage of doc String __doc__")
print(additionfun.__doc__)
print("----------------------------")
print("by help():")
help(additionfun)
| true
|
6f2efc2bb79c1da14a8a5830fecc6f262b0043f7
|
Python
|
alexwork13/python_lessons
|
/statements/compr_list_2.py
|
UTF-8
| 208
| 3.90625
| 4
|
[] |
no_license
|
for i in range(1,100):
if i % 3 == 0:
print(f"{i} - Fizz div 3")
if i % 5 == 0:
print(f"{i} - Buzz div 5")
if i % 3 == 0 and i % 5 == 0:
print(f"{i} - FizzBuzz div 3and5")
| true
|
ed07c5d0cabf2b1b274622f933c78e7133ffd472
|
Python
|
arsezzy/python_base
|
/lesson3/lesson3_5.py
|
UTF-8
| 1,461
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/python3
def summarization(current_sum, digits_list, symbol):
'''Summarize integers in digit_list and add it to current_sum
current_sum - integer
digits_list - list of digit, where digit is string
symbol - special symbol for exit. If special symbol is met,
stop to sum and return one_more_time = False
if any other symbol is met, skip it
'''
one_more_time = True
for digit in digits_list:
try:
digit = int(digit)
except ValueError:
try:
exit_symbol_index = digit.index(symbol)
if exit_symbol_index == 0:
digit = 0
else:
digit = int(digit[:exit_symbol_index])
current_sum += digit
one_more_time = False
break
except ValueError:
print(f"{digit} is not an integer, missing it")
digit = 0
current_sum += digit
return current_sum, one_more_time
special_symbol = '!'
current_sum = 0
again = True
while again:
user_digits = input(f"please enter a string of digits with spacebar"
f" separator. For exit "
f"press '{special_symbol}'\n").split()
current_sum, again = summarization(current_sum, user_digits, special_symbol)
print(f"current sum is {current_sum}")
print(f"special symbol {special_symbol} is entered, exiting")
| true
|
b5f2f41401766568f09b4bed39d468acd24e9651
|
Python
|
code4tots/simple-amixer-gui
|
/simple_amixer_gui.py
|
UTF-8
| 1,951
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/python
'''
Should be compatible with both Python 2 and 3.
From what I understand, the only issue is the tkinter package name.
'''
import sys
from os import popen
if sys.version_info >= (3,0):
from tkinter import *
else:
from Tkinter import *
'''
callbacks
'''
def on_new_scale_value(v):
popen('amixer -c 0 set Master %s' % v).read()
def on_mouse_wheel(event):
if event.num == 5 or event.delta == -120:
value.set(value.get()-1)
if event.num == 4 or event.delta == 120:
value.set(value.get()+1)
on_new_scale_value(value.get())
'''
get amixer data
'''
def cmd(c):
'''
Call system command specified by 'c' and return output stripping whitespace.
'''
return popen(c).read().strip()
start = int(cmd(
r"""amixer get Master |
grep -m 1 -o 'Playback [0-9][0-9]* \[[0-9][0-9]*\%\]' | \
grep -m 1 -o '[0-9][0-9]*'""").split()[0])
low, high = map(int,cmd(
r"""amixer get Master | \
grep -m 1 -o 'Limits: Playback [0-9][0-9]* \- [0-9][0-9]*' | \
grep -m 2 -o '[0-9][0-9]*'""").split())
'''
Setup Tkinter
'''
root = Tk()
value = DoubleVar()
value.set(start)
scale = Scale(root,
variable = value,
command = on_new_scale_value,
from_=low,
to=high,
width=15,
length=200)
scale.pack(anchor=CENTER)
'''
Windows throws <MouseWheel> events,
Linux throws <Button-4> and <Button-5> events
However, it probably is silly adding Windows support here, because I'm pretty
sure that Windows doesn't use alsamixer or grep.
Maybe it can just be reference if I want to create another Python Tkinter
script with mouse scrolls.
'''
root.bind("<MouseWheel>", on_mouse_wheel)
root.bind("<Button-4>", on_mouse_wheel)
root.bind("<Button-5>", on_mouse_wheel)
'''
Set window to upper right corner,
and dimension of the root window.
'''
root.geometry('60x200-0+0')
root.wm_title('simple_amixer_gui')
root.mainloop()
| true
|
113bbfe7afed99f60474419ec2bcf5a79255c90b
|
Python
|
ZhengkaiZ/Summer-Project-Data-Processing-
|
/graph_helper.py
|
UTF-8
| 2,630
| 3.109375
| 3
|
[] |
no_license
|
""" Data Analysis based on Graph
This code will generate grapg.out file to print out our desired graph
"""
import sys
import networkx as nx
import pylab as plt
from sets import Set
def read_device(file_name, static_device):
"""
this module is to read the device list from disk and remove noise.
Args:
file_name: input file to read from disk
static_device: the static device list
Returns:
device_list: a list of set which store data from each node after
removing noise
"""
device_list = [];
block = -1;
with open(file_name) as f:
for line in f.readlines():
list = line.split(" ")
if (list[0] == '*'):
block += 1
device_list.append(Set())
continue
else:
if (list[0] in static_device):
continue
device_list[block].add(list[0])
return device_list
def time_switch(desired_time):
"""
this module is to switch time from BST to ET
Args:
desired_time: time at ET
Returns:
the return vlue: the list position
"""
return (desired_time + 4) * 6 * 60
def connectivity_at_certain_time(time, device_list, node):
"""
this module is to build graph based on the device list
Args:
time: desired time to process
device_list : device list read befor
Returns:
dict : dictionary contains the graph built
"""
device_count = len(device_list)
dict = {}
length = len(device_list);
for i in range(0, length):
temp_set = device_list[i]
for entry in temp_set[time]:
for j in range(0, length):
if (j == i):
continue
for x in range (1, 6):
if (entry in device_list[j][time + x]):
if (dict.get(str(node[i]) + " " + str(node[j])) == None):
dict[str(node[i]) + " " + str(node[j])] = 1
else:
dict[str(node[i]) + " " + str(node[j])] += 1
return dict
def dict_to_graph(dict):
"""
this module is to build graph based on the dictionary
Args:
dict : dictionary contains the graph built
Returns:
G : graph we built with label (weight)
"""
G = nx.MultiDiGraph()
for key in dict.keys():
pos = key.split(" ")
G.add_edge(pos[0], pos[1], label=str(dict.get(key)))
return G
| true
|
bf62bf514a66ed993fe7d696423a743da3f5d4a5
|
Python
|
CyborgVillager/Learning_py_info
|
/basic info/Diction/dicti0.py
|
UTF-8
| 589
| 3.171875
| 3
|
[] |
no_license
|
month_Convert = {
'Jan': {'January', '31'},
'Feb': 'February',
'Mar': 'March',
'Apr': 'April',
'May': 'May',
'Jun': 'June',
'Jul': 'July',
'Aug': 'August',
'Sep': 'September',
'Oct': 'October',
'Nov': 'November',
'Dec': 'December',
}
day_Convert = {
'Jan': '31',
'Feb': '28',
'Mar': '31',
'Apr': '30',
'May': '31',
'Jun': '30',
'Jul': '31',
'Aug': '31',
'Sep': '30',
'Oct': '31',
'Nov': '30',
'Dec': '31',
}
print('The month ' + month_Convert['Nov'] + ' has ' + day_Convert['Nov'] + ' days')
| true
|
3f55dfb7b05bae020e5e6960d08da984ddb59f3c
|
Python
|
rbunge-nsc/it111-demos
|
/Modify/ModifyFile.py
|
UTF-8
| 212
| 3.578125
| 4
|
[] |
no_license
|
filename = input("Enter a file name:")
f = open(filename, "a")
print("File name " + filename + " has been opened.")
textinput = input("Enter some text to add to the file:")
f.write(textinput)
f.close()
| true
|
16982f2479f252a3df1e6db5ccbd60c8d959e669
|
Python
|
ErenBtrk/Python-Fundamentals
|
/Numpy/NumpyLinearAlgebra/Exercise13.py
|
UTF-8
| 253
| 3.921875
| 4
|
[] |
no_license
|
'''
13. Write a NumPy program to calculate the QR decomposition of a given matrix.
'''
import numpy as np
m = np.array([[1,2],[3,4]])
print("Original matrix:")
print(m)
result = np.linalg.qr(m)
print("Decomposition of the said matrix:")
print(result)
| true
|
dac79a3997dec9bfaea7cd88364f949af61b8870
|
Python
|
nekapoor7/Python-and-Django
|
/PythonNEW/Practice/StringVowelsSet.py
|
UTF-8
| 156
| 3.625
| 4
|
[] |
no_license
|
"""Python program to count number of vowels using sets in given string"""
import re
s = input()
s1 = s.lower()
ss = re.findall(r'[aeiou]',s1)
print(set(ss))
| true
|
a8547e75ba2ceb5c70d74f3c25cfa37b94adffc8
|
Python
|
gk1914/neural-network-mnist
|
/neural_network.py
|
UTF-8
| 4,923
| 3.15625
| 3
|
[] |
no_license
|
import numpy as np
import random
class NeuralNetwork(object):
def __init__(self, layer_sizes):
"""Neural network consisting of 'self.num_layers' layers. Each layer has
a specific number of neurons specified in 'layer_sizes', which defines
the architecture of the NN.
Weights initialized using Gaussian distribution with mean 0 & st. dev. 1
over the square root of the number of weights connecting to the same neuron."""
self.num_layers = len(layer_sizes)
self.weights = [np.random.randn(size2, size1) / np.sqrt(size1)
for size1, size2 in zip(layer_sizes[:-1], layer_sizes[1:])]
self.biases = [np.random.randn(size, 1) for size in layer_sizes[1:]]
def feedforward(self, a):
"""Return the network's output if 'a' is the input."""
for w, b in zip(self.weights, self.biases):
a = sigmoid(np.dot(w, a) + b)
return a
def stochastic_gradient_descent(self, training_data, epochs, batch_size, learn_rate, test_data=None):
"""Implements the method of stochastic gradient descent, training the network
by passing over the training data multiple times ('epochs'), each time using
subsets of data of size 'batch_size'."""
training_data = list(training_data) # mogoče lahko dam ven in dam v loadmnistdata
n = len(training_data)
if test_data:
test_data = list(test_data) # isto
n_test = len(test_data)
for i in range(epochs):
# create random batches for this epoch
random.shuffle(training_data)
batches = [training_data[j:j+batch_size] for j in range(0, n, batch_size)]
# update batch
for batch in batches:
self.update_batch(batch, learn_rate)
# evaluate learning progress
if test_data:
print("Epoch {} : {} / {}".format(i, self.evaluate(test_data), n_test))
else:
print("Epoch {} complete".format(i))
def backpropagation(self, x, y):
"""Backpropagation algorithm."""
grad_w = [np.zeros(w.shape) for w in self.weights]
grad_b = [np.zeros(b.shape) for b in self.biases]
# feedforward
activation = x
activations = [x]
zs = []
for w, b in zip(self.weights, self.biases):
z = np.dot(w, activation) + b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * sigmoid_derivative(zs[-1])
grad_w[-1] = np.dot(delta, activations[-2].transpose())
for layer in range(2, self.num_layers):
z = zs[-layer]
sig_deriv = sigmoid_derivative(z)
delta = np.dot(self.weights[-layer+1].transpose(), delta) * sig_deriv
grad_w[-layer] = np.dot(delta, activations[-layer-1].transpose())
grad_b[-layer] = delta
return grad_w, grad_b
def update_batch(self, batch, learn_rate):
"""Update the weights & biases of the network according to gradient descent of
a single batch using backpropagation."""
grad_w = [np.zeros(w.shape) for w in self.weights]
grad_b = [np.zeros(b.shape) for b in self.biases]
batch_size = len(batch)
for x, y in batch:
delta_grad_w, delta_grad_b = self.backpropagation(x, y)
grad_w = [gw + dgw for gw, dgw in zip(grad_w, delta_grad_w)]
grad_b = [gb + dgb for gb, dgb in zip(grad_b, delta_grad_b)]
self.weights = [w - (learn_rate/batch_size) * gw
for w, gw in zip(self.weights, grad_w)
]
self.biases = [b - (learn_rate/batch_size) * gb
for b, gb in zip(self.biases, grad_b)
]
def evaluate(self, test_data):
"""Return the number of correctly classified test inputs."""
test_results = [(np.argmax(self.feedforward(x)), y) for x, y in test_data]
return sum(int(x == y) for x, y in test_results)
def cost_derivative(self, output_activations, y):
"""Return vector of partial derivatives of quadratic cost function (f(a) = 1/2 (a-y)^2)
in respect to output activations."""
return output_activations - y
# ------------------------------------------------------------------------------------
# Helper functions
# ------------------------------------------------------------------------------------
def sigmoid(z):
"""Compute sigmoid function."""
return 1 / (1 + np.exp(-z))
def sigmoid_derivative(z):
"""Return derivative of sigmoid function."""
return sigmoid(z) * (1-sigmoid(z))
| true
|
03b548397b906cc5f1edd9fad1382c646fdaca2a
|
Python
|
pacellig/personal
|
/RSA_AES_key_encryption.py
|
UTF-8
| 2,809
| 3.46875
| 3
|
[] |
no_license
|
"""
Created on: 30/05/18
Author : pacellig
Requires pycryptodome ($ pip install pycryptodome) in order to use 'AES.MODE_EAX' mode.
1) Produce a private/public key couple
2) Use the public key (RSA) to encrypt the generated OTP
3) Use the generated OTP to encrypt, via AES, the desired message
4) Decrypt the message using the corresponding private key (RSA)
"""
from Crypto.Random import get_random_bytes
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, PKCS1_OAEP
def key_gen():
# Generate a public/ private key pair using 4096 bits key length
key = RSA.generate(4096)
# Private key in PEM format
private_key = key.exportKey("PEM")
# Public key in PEM format
public_key = key.publickey().exportKey("PEM")
# Save private and public keys to file
fd = open("private_key.pem", "wb")
fd.write(private_key)
fd.close()
fd = open("public_key.pem", "wb")
fd.write(public_key)
fd.close()
def encrypt_message(plaintext, public_key):
# Generate a random session key, to use as OTP
session_key = get_random_bytes(16)
# Encrypt the session key with the public RSA key
rsa_key = RSA.importKey(public_key)
rsa_key = PKCS1_OAEP.new(rsa_key)
enc_session_key = rsa_key.encrypt(session_key)
# Encrypt the data with AES using encrypted session key
aes_key = AES.new(session_key, AES.MODE_EAX)
ciphertext, tag = aes_key.encrypt_and_digest(plaintext)
file_out = open("encrypted.bin", "wb")
[file_out.write(x) for x in (enc_session_key, aes_key.nonce, tag, ciphertext)]
file_out.close()
def decrypt_message(path_to_encrypted_file, private_key):
encrypted_fd = open(path_to_encrypted_file, "rb")
rsa_key = RSA.importKey(private_key)
enc_session_key, nonce, tag, ciphertext = [encrypted_fd.read(x) for x in (rsa_key.size_in_bytes(), 16, 16, -1)]
# Decrypt the session key with the private RSA key
rsa_key = PKCS1_OAEP.new(rsa_key)
session_key = rsa_key.decrypt(enc_session_key)
# Decrypt the data with the AES session key
aes_key = AES.new(session_key, AES.MODE_EAX, nonce)
data = aes_key.decrypt_and_verify(ciphertext, tag)
return data.decode("utf-8")
def test_encrypt_decrypt():
# Use the public key for encryption
fd = open("public_key.pem", "rb")
public_key = fd.read()
fd.close()
# Read plaintext from file
fd = open('plaintext.txt', 'r')
plaintext = fd.read()
encrypt_message(plaintext, public_key)
# Use the private key for decryption
fd = open("private_key.pem", "rb")
private_key = fd.read()
fd.close()
decrypted = decrypt_message("encrypted.bin", private_key)
print decrypted
if __name__ == '__main__':
# Generate private/public keys pair
key_gen()
test_encrypt_decrypt()
| true
|
988ac42e624903a79be427b4aef9d0b5ccc6aa02
|
Python
|
6/jcrawler
|
/2ch_parser.py
|
UTF-8
| 4,750
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
import glob
import os
import re
import csv
import operator
from datetime import datetime
# Source: http://stackoverflow.com/questions/3217682/checking-validity-of-email-in-django-python
REGEX_EMAIL = re.compile("[\w\.-]+@[\w\.-]+\.\w{2,4}")
def analyze_2ch():
files = glob.glob(os.path.join("data/2ch/", "*.data"))
messages = {}
num_messages = 0
for i,fpath in enumerate(files):
thread_id = fpath.split("_")[-1].split(".")[0]
extracted_on = fpath.split("/")[-1].split("_")[0]
extracted_on = datetime.strptime(extracted_on, "%Y%m%d%H%M%S")
print i+1,"of",len(files),fpath
f = open(fpath, 'r').read()
messages[thread_id] = thread_parser(f, extracted_on)
num_messages += len(messages[thread_id])
print "Analyzed {0} messages".format(num_messages)
visited = []
for fpath in files:
# determine default "anonymous" name (varies by board/subdomain)
key = fpath.split("_")
key = "{0}_{1}".format(key[1], key[2])
if key in visited:
continue
visited.append(key)
threads = glob.glob(os.path.join("data/2ch/", "*_{0}_*.data".format(key)))
names = {}
for t in threads:
thread_id = t.split("_")[-1].split(".")[0]
for m in messages[thread_id]:
if m["name"] not in names:
names[m["name"]] = 1
else:
names[m["name"]] += 1
sorted_names = sorted(names.iteritems(), key=operator.itemgetter(1))
default_name = sorted_names[-1][0]
# convert name string --> bool: if user has custom name or just "anonymous"
for t in threads:
thread_id = t.split("_")[-1].split(".")[0]
for i,m in enumerate(messages[thread_id]):
has_custom_name = 0 if m["name"] == default_name else 1
messages[thread_id][i]["name"] = has_custom_name
# convert dict into a list so can write to CSV
message_data = []
for thread_id in messages:
for m in messages[thread_id]:
message_data.append([m["name"], m["valid_email"], m["year"], m["age"], m["replies"], m["length"]])
headers = ("name", "email", "year", "age", "replies", "length")
write_csv("2ch.csv", headers, message_data)
def thread_parser(raw_data, extracted_on):
thread = []
messages = raw_data.split("<dt>")
messages.pop(0) # this first item is not a message
for m in messages:
meta, msg = m.split("<dd>")
meta_data = meta_parser(meta, extracted_on)
if not meta_data:
continue
data, reply_to = message_parser(msg, meta_data)
for message_id in reply_to:
for i,msg in enumerate(thread):
if msg["id"] == message_id:
thread[i]["replies"] += 1
break
thread.append(data)
return thread
# Parse message meta-data. Returns False if invalid.
def meta_parser(raw, extracted_on):
meta = re.sub(r" (ID:[^<]+)?</dt>", "", raw)
meta = meta.split("\x81F") # Shift-JIS colon character
message_id = int(meta[0].strip())
date = re.sub(r"\([^)]+\)", "", meta[-1]) # remove day of the week
m = re.search("([0-9]{2,4})(/[0-9]{1,2}/[0-9]{1,2} [0-9]{1,2}:[0-9]{1,2})", date)
if m:
date = m.group(0)
if m.group(1).startswith("0"):
# messages ~2005 and before have abbreviated year (ex: 05 instead of 2005)
date = "20"+m.group(1)+m.group(2)
else:
# When message is deleted, the date is deleted as well.
return False
try:
created_on = datetime.strptime(date, "%Y/%m/%d %H:%M")
except ValueError:
# In one case, messages have an invalid date of "2006/03/32".
return False
age = extracted_on - created_on
est_jst_diff = 13*60*60 # time diff between EST and Japan time (13 hours)
age = (age.days * 86400) + age.seconds + est_jst_diff
if age < 0:
# in one case, an invalid date lists the year as "2665"
return False
name_string = "".join(meta[1:-1])
name = name_string.split("<b>")[1].split("</b>")[0]
email = REGEX_EMAIL.search(name_string)
has_email = 1 if email else 0
return {
"id": message_id
,"year": created_on.year
,"age": age
,"name": name
,"valid_email": has_email
,"replies": 0
}
def message_parser(raw, data):
msg = re.sub(r"<br><br> </dd>(</dl>)?", "", raw)
msg = re.sub(r" <br> ", "", msg) # remove inline linebreaks
msg = msg.strip()
data["length"] = len(msg)
reply_to = re.findall("read.cgi/[^/]+/[0-9]+/([0-9]+)", msg)
reply_to = map(int, list(set(reply_to)))
# remove invalid replies to comments that haven't been posted yet
reply_to = [r for r in reply_to if r < data["id"]]
return [data, reply_to]
def write_csv(fname, headers, list_of_lists):
f = open(fname, 'wb')
writer = csv.writer(f)
writer.writerow(headers)
for l in list_of_lists:
writer.writerow(l)
f.close()
if __name__=="__main__":
analyze_2ch()
| true
|
61d9cbaa5f17203ee490e9ce1ee17cdf4d3b7c4e
|
Python
|
Bgh0602/learn-python
|
/dice.py
|
UTF-8
| 422
| 3.75
| 4
|
[] |
no_license
|
# 주사위 게임
'''주사위 번호를 맞출 수 있도록 함. 무한반복을 하고 만약 맞추면 반복을 탈출한다.'''
import random
diceN = random.randint(1, 6)
trial = 1
while True:
guess = int(input('What is the dice number?(1~6):'))
if guess != diceN:
print('again!')
trial += 1
if guess == diceN:
print('correct!')
print('your trial:', trial)
break
| true
|
388c09e071b8ac5efc4e90623c3c892a6070db69
|
Python
|
jarinfrench/iprPy
|
/iprPy/tools/get_mp_structures.py
|
UTF-8
| 3,740
| 2.671875
| 3
|
[] |
no_license
|
# Standard Python libraries
from pathlib import Path
import uuid
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# https://github.com/usnistgov/atomman
import atomman as am
# iprPy imports
from .. import libdir
def build_reference_crystal_model(name, ucell, sourcename, sourcelink):
"""Generates a reference_crystal data model"""
model = DM()
model['reference-crystal'] = DM()
model['reference-crystal']['key'] = str(uuid.uuid4())
model['reference-crystal']['id'] = name
model['reference-crystal']['source'] = DM()
model['reference-crystal']['source']['name'] = sourcename
model['reference-crystal']['source']['link'] = sourcelink
model['reference-crystal']['atomic-system'] = ucell.model()['atomic-system']
return model
# Define subset generator
def subsets(fullset):
"""Yields element combination subsets"""
for i, item in enumerate(fullset):
yield [item]
if len(fullset) > 1:
for subset in subsets(fullset[i+1:]):
yield [item] + subset
def get_mp_structures(elements, api_key=None, lib_directory=None):
"""
Accesses the Materials Project and downloads all structures for a list of
elements as poscar files.
Parameters
----------
elements : list
A list of element symbols.
api_key : str, optional
The user's Materials Project API key. If not given, will use "MAPI_KEY"
environment variable
lib_directory : str
Path to the lib_directory to save the poscar files to. Default uses
the iprPy library/reference_crystal directory.
"""
# Function-specific imports
import pymatgen as pmg
from pymatgen.ext.matproj import MPRester
# Set source name and link
sourcename = "Materials Project"
sourcelink = "https://materialsproject.org/"
# Handle lib_directory
if lib_directory is None:
lib_directory = Path(libdir, 'reference_crystal')
if not lib_directory.is_dir():
lib_directory.mkdir()
elements.sort()
# Build list of downloaded entries
have = []
for fname in lib_directory.glob('*.json'):
have.append(fname.stem)
# Open connection to Materials Project
with MPRester(api_key) as m:
# Loop over subsets of elements
for subelements in subsets(elements):
# Query MP for all entries corresponding to the elements
entries = m.query({"elements": subelements}, ["material_id"])
# Add entries to the list if not there
missing = []
for entry in entries:
if entry['material_id'] not in have and entry['material_id'] not in missing:
missing.append(entry['material_id'])
# Download missing entries
try:
entries = m.query({"material_id": {"$in": missing}}, ['material_id', 'cif'])
except:
pass
else:
# Convert cif to model and save
for entry in entries:
name = entry['material_id']
struct = pmg.Structure.from_str(entry['cif'], fmt='cif')
struct = pmg.symmetry.analyzer.SpacegroupAnalyzer(struct).get_conventional_standard_structure()
ucell = am.load('pymatgen_Structure', struct).normalize()
model = build_reference_crystal_model(name, ucell, sourcename, sourcelink)
with open(Path(lib_directory, name+'.json'), 'w') as f:
model.json(fp=f, indent=4)
print('Added', entry['material_id'])
| true
|
3381741e9b71620ce4ab918482d1f92128f967aa
|
Python
|
18303585361/Zero
|
/11.面向对象-高阶/5.面向对象-高阶-魔术方法(二).py
|
UTF-8
| 2,801
| 4.40625
| 4
|
[] |
no_license
|
# 面向对象 魔术方法(二)
'''
1. __len__
触发机制:当使用 len 函数去检测当前对象的时候自动触发
作用:可以使用 len 函数检测当前对象中某个数据的信息
参数:一个 self 接收当前对象
返回值:必须有,并且必须是一个整型
注意事项:len 要获取什么属性的值,就在返回值中返回哪个属性的长度即可
2. __str__
触发机制:当使用 str 或者 print 函数对对象进行操作时,自动触发
作用:代替对象进行字符串的返回,可以自定义打印的信息
参数:一个 self 接收当前对象
返回值:必须有,而且必须是字符串类型
3. __repr__
触发机制:在使用 repr 方法对当前对象进行转换时自动触发
作用:可以设置 repr 函数操作对象的结果
参数:一个 self 接收当前对象
返回值:必须有,而且必须是字符串类型
注意事项:正常情况下,如果没有 __str__ 方法, __repr__ 方法就会代替 __str__方法
4. __bool__
触发机制:当前使用 bool 函数转换当前对象时,自动触发。默认情况下,对象会转为True
作用:可以代替对象进行 bool 类型的转换,可以转换任何数据
参数:一个 self 接收对象
返回值:必须是一个 bool 类型的返回值
5. __str__ 和 __repr__ 的区别
str 和 repr 的区别:
1.str 和 repr 函数都可以把其它类型的数据转为字符串
2.str 函数会把对象 转为 更适合人类阅读的形式
repr 函数会把对象 转为 解释器读取的形式
3.如果数据对象并没有更明显的区别的话,str 和 repr的结果是一样的
'''
# class Demo():
# listurl = []
#
# # 可以代替对象使用 len 函数,并返回一个制定的整型
# def __len__(self):
# return len(self.listurl)
#
# # 可以代替对象进行 str 或者 print 的字符串信息返回
# def __str__(self):
# return '<这是当前脚本中的一个 对象 str>'
#
# def __repr__(self): # 可以代替 str 方法触发
# return '这是一个对象 repr'
#
# def __bool__(self):
# return bool(self.listurl)
# 实例化对象
# obj = Demo()
# res = len(obj)
# res = str(obj)
# print(res)
# print(obj)
# res = repr(obj)
# res = bool(obj)
# print(res)
# num = 521
# r1 = str(num)
# r2 = repr(num)
# s = '521'
# r1 = str(s)
# r2 = repr(s)
# print(r1,type(r1))
# print(r2,type(r2))
# class Demo():
# def __str__(self):
# return '123'
#
# def __repr__(self):
# return '123'
#
# obj = Demo()
# r1 = str(obj)
# r2 = repr(obj)
# print(r1,type(r1))
# print(r2,type(r2))
| true
|
8bcebebedc397ac01b890b5f512168fa1cb1ab0b
|
Python
|
m-bronnikov/Cryptography
|
/lab3/variant_number.py
|
UTF-8
| 681
| 3.171875
| 3
|
[] |
no_license
|
# Made by Bronnikov Max
from pygost import gost34112012256
import sys
def number_from_str(family):
if not isinstance(family, str):
return -1
# Working only with .encode() method, else TypeError
last = gost34112012256.new(family.encode()).digest()[-1]
last &= 15
if last >= 10:
return chr(ord('A') + last - 10)
return str(last)
if __name__ == "__main__":
family = "Бронников Максим Андреевич"
if len(sys.argv) == 2 and sys.argv[1] == "-i":
family = input("Введите ваше ФИО: ")
elif len(sys.argv) > 1:
raise ValueError("Wrong args")
print("Вариант для " + family + ":", number_from_str(family))
| true
|
c7d17c63824819379698788245f00753c1300ca7
|
Python
|
youaresoroman/pp1
|
/01-TypesAndVariables/duringclass/21.py
|
UTF-8
| 61
| 2.90625
| 3
|
[] |
no_license
|
C = float(input('Podaj liczbe:'))
F = (C * 1.8) + 32
print(F)
| true
|
0e946aa3697e721ce27671d8b2ca36a5993ae644
|
Python
|
andreamatranga/building-damage
|
/damage/models/cnn.py
|
UTF-8
| 2,284
| 2.703125
| 3
|
[] |
no_license
|
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, BatchNormalization
from tensorflow.keras.models import Sequential
from damage.models.losses import *
from damage.models.base import Model
class CNN(Model):
metrics = ['accuracy', recall, specificity, precision, negatives]
def __init__(self, convolutional_layers, dense_units=64, learning_rate=0.1, **kwargs):
self.convolutional_layers = convolutional_layers
self.dense_units = dense_units
self.learning_rate = learning_rate
self.model = self._create_model()
def fit_generator(self, generator, epochs, steps_per_epoch, class_weight=None, **kwargs):
self.model.fit_generator(generator, epochs=epochs, steps_per_epoch=steps_per_epoch,
class_weight=class_weight)
def validate_generator(self, train_generator, test_generator, validation_steps,
epochs, steps_per_epoch, class_weight=None, **kwargs):
model_fit = self.model.fit_generator(train_generator, validation_data=test_generator,
epochs=epochs, validation_steps=validation_steps,
steps_per_epoch=steps_per_epoch, class_weight=class_weight)
return model_fit.history
def predict_generator(self, generator, **kwargs):
return self.model.predict_generator(generator, **kwargs)
def _create_model(self):
layers = []
for config in self.convolutional_layers:
layers.append(self._create_convolutional_and_pooling_layer(**config))
layers.extend([
Flatten(),
Dense(units=self.dense_units),
BatchNormalization(),
Dense(units=1, activation='relu'),
])
model = Sequential(layers)
model.compile(optimizer='adam', loss='binary_crossentropy', learning_rate=self.learning_rate,
metrics=self.metrics)
return model
@staticmethod
def _create_convolutional_and_pooling_layer(filters, kernel_size, pool_size):
conv = Conv2D(filters=filters, kernel_size=kernel_size, padding="same", activation='relu')
pool = MaxPooling2D(pool_size=pool_size, strides=1)
return pool
| true
|