blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b6bfb28cfd4ceb8da66d3a170374ca817838b6a3 | Python | hygnic/boomboost | /hyosmnx/main.py | UTF-8 | 1,768 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# ---------------------------------------------------------------------------
# Author: LiaoChenchen
# Created on: 2021/3/19 13:53
# Reference: https://towardsdatascience.com/making-artistic-maps-with-python-9d37f5ea8af0
"""
Description:
Usage:
"""
# ---------------------------------------------------------------------------
###############################################################################
# 1. Importing Libraries #
###############################################################################
# To make maps
import PIL
import networkx as nx
import osmnx as ox
import requests
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib.lines import Line2D
# To add text and a border to the map
from PIL import Image, ImageOps, ImageColor, ImageFont, ImageDraw
###############################################################################
# 2. Version Check #
###############################################################################
print(f"The NetworkX package is version {nx.__version__}")
print(f"The OSMNX package is version {ox.__version__}")
print(f"The Request package is version {requests.__version__}")
print(f"The PIL package is version {PIL.__version__}")
###############################################################################
# 3. Get Data #
###############################################################################
# Define city/cities
places = ["Lawrence, Kansas, USA"]
# Get data for places
G = ox.graph_from_place(places, network_type = "all", simplify = True) | true |
6e07b6182a4444fa01cfc2a431b19c4f902fd836 | Python | JeK2a/UchetTrat | /cursValut.py | UTF-8 | 803 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request
from xml.etree import ElementTree as ET
# Получение курса доллара
def getCursUSD():
valuta = ET.parse(urllib.request.urlopen("http://www.cbr.ru/scripts/XML_daily.asp?date_req"))
for line in valuta.findall('Valute'):
id_v = line.get('ID')
if id_v == "R01235":
return line.find('Value').text
# Получение курса евро
def getCursEUR():
valuta = ET.parse(urllib.request.urlopen("http://www.cbr.ru/scripts/XML_daily.asp?date_req"))
for line in valuta.findall('Valute'):
id_v = line.get('ID')
if id_v == "R01239":
return line.find('Value').text
if __name__ == "__main__":
print(getCursEUR())
print(getCursUSD())
| true |
018fdcc0beacdb0b6b76f27678cd0856116bf639 | Python | Gavin666Github/yolo3-keras | /tool/brainwash_annotation.py | UTF-8 | 1,098 | 2.5625 | 3 | [] | no_license | def convert_to_yolo(f_r, f_w):
for line in f_r:
line = line.replace('"', '')
line = line.replace(':', '')
line = line.replace('(', '')
line = line.replace('),', '')
line = line.replace(');', '')
line = line.replace(').', '')
line = line.replace('\n', '')
line = line.replace(', ', ',')
loc = line.split(' ')
# 图像中没有要标注的物体
if len(loc) == 1:
continue
f_w.write('/root/meeting_room/data/brainwash/' + loc[0])
for i in range(1,len(loc)):
loc[i] = ' ' + ','.join(str(int(float(j))) for j in loc[i].split(',')) + ',0'
f_w.write(loc[i])
f_w.write('\n')
if __name__ == "__main__":
f_r_test = open('brainwash_test.idl')
f_r_train = open('brainwash_train.idl')
f_r_val = open('brainwash_val.idl')
f_w = open('yolo_data.txt', 'w')
convert_to_yolo(f_r_test, f_w)
convert_to_yolo(f_r_train, f_w)
convert_to_yolo(f_r_val, f_w)
f_r_test.close()
f_r_train.close()
f_r_val.close()
f_w.close() | true |
3951a4db1358907389df58c063e71364b02a1d95 | Python | rcpsilva/InFTS2018-12 | /animation_fts.py | UTF-8 | 2,823 | 2.546875 | 3 | [] | no_license | from matplotlib import animation
import pertinence_funcs as pf
import numpy as np
import matplotlib.pyplot as plt
from fts_concrete import ConcreteFTS
from fts_stream import StreamAdaptiveWindowFTS
from fts_incremental_rule_deletion import IncMuSigmaRuleDeletionFTS
#Fts order
order = 3
nsets = 9
window_size = 100
# Gather sample data
n_samples = 100
x = np.linspace(-np.pi, 10*np.pi, n_samples)
data = 3 + np.sin(x) # + np.random.normal(0, 0.1, len(x))
data2 = 3 + np.sin(x) + x
data = np.concatenate((data, data2, data+35))
t = np.arange(len(data))
lines = []
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(-50, 310), ylim=(-5, 50))
line, = ax.plot([], [], lw=2)
line2, = ax.plot([], [], lw=2)
lines.append(line)
lines.append(line2)
# ifts = StreamAdaptiveWindowFTS(nsets=nsets, order=order, bound_type='mu-sigma', update_type='retrain', deletion=False)
ifts = IncMuSigmaRuleDeletionFTS(nsets=nsets, order=order, deletion=True, bound_type='mu-sigma')
# initialization function: plot the background of each frame
def init():
for i in range(nsets):
nline, = ax.plot([], [], lw=1)
lines.append(nline)
for l in lines:
l.set_data([], [])
return lines
x = t[1:]
y = data[:-1]
ifts_forecast = []
partitions = []
samples_so_far = 0
count = 1
for d in data:
print('{} of {}'.format(count, len(data)))
count = count+1
samples_so_far = samples_so_far + 1
ifts_forecast.append(ifts.predict(d))
if ifts.partitions:
partitions.append(ifts.partitions)
else:
partitions.append([])
ts = np.arange(1, samples_so_far+1)
# animation function. This is called sequentially
def animate(i):
lines[0].set_data(x, y)
lines[1].set_data(ts[:i], ifts_forecast[:i])
if partitions[i]:
dt = pf.plot_partitions_data(partitions[i])
xs = dt[0]
ys = dt[1]
#lines[2].set_data(ys, xs)
for i in range(nsets):
lines[i+2].set_data(ys[i], xs[i])
return tuple(lines)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(x), interval=20, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
plt.rcParams['animation.ffmpeg_path'] = 'C:/Program Files/FFmpeg/ffmpeg-20190102-54109b1-win64-static/bin/ffmpeg'
FFwriter = animation.FFMpegWriter(fps=30, extra_args=['-vcodec', 'libx264'])
anim.save('ifts_v4.mp4', writer=FFwriter)
| true |
aa6fdc80e820b53b87bb8022ce923c8a3f1b4ecd | Python | miraedbswo/DMS-Backend | /Server/tests/v1/views/admin/account/test_account_control.py | UTF-8 | 3,058 | 2.609375 | 3 | [
"MIT"
] | permissive | from tests.v1.views import TCBase
from app.models.account import SignupWaitingModel
class TestDeleteAdminAccount(TCBase):
"""
TC about admin account deletion
This TC tests
* DELETE /admin/account-control
"""
def setUp(self):
"""
- Before Test
Create new admin account of id 'deleteme'
* POST /admin/new-account
"""
TCBase.setUp(self)
# ---
self.id_for_delete = 'deleteme'
self.request(
self.client.post,
'/admin/new-account',
{'id': self.id_for_delete, 'pw': 'pw', 'name': 'test'},
self.admin_access_token
)
def tearDown(self):
"""
- After Test
"""
SignupWaitingModel.objects.delete()
# ---
TCBase.tearDown(self)
def test(self):
"""
- Test
Delete admin account of id 'deleteme'
* Validation
(1) status code : 200
- Exception Test
Delete already deleted admin account of id 'deleteme'
* Validation
(1) status code : 204
"""
# -- Test --
resp = self.request(
self.client.delete,
'/admin/account-control',
{'id': self.id_for_delete},
self.admin_access_token
)
# (1)
self.assertEqual(resp.status_code, 200)
# -- Test --
# -- Exception Test --
resp = self.request(
self.client.delete,
'/admin/account-control',
{'id': self.id_for_delete},
self.admin_access_token
)
# (1)
self.assertEqual(resp.status_code, 204)
# -- Exception Test --
class TestLoadStudentSignStatus(TCBase):
"""
TC about student's sign status loading
This TC tests
* GET /student-sign-status
"""
def tearDown(self):
"""
- After Test
"""
SignupWaitingModel.objects.delete()
# ---
TCBase.tearDown(self)
def test(self):
"""
- Test
Load student sign status
* Validation
(1) status code : 200
(2) response data type : dictionary
(3) length of resource : 2
(4) response data format
{
'unsigned_student_count': 0,
'signed_student_count': 1
}
- Exception Test
None
"""
# -- Test --
resp = self.request(
self.client.get,
'/admin/student-sign-status',
{},
self.admin_access_token
)
# (1)
self.assertEqual(resp.status_code, 200)
# (2)
data = self.get_response_data(resp)
self.assertIsInstance(data, dict)
# (3)
self.assertEqual(len(data), 2)
# (4)
self.assertDictEqual(data, {
'unsigned_student_count': 0,
'signed_student_count': 1
})
# -- Test --
| true |
908a4cfd870fc703d75ab3f0c484cd305a402029 | Python | skyyi1126/leetcode | /1192.critical-connections-in-a-network.py | UTF-8 | 1,219 | 3.1875 | 3 | [] | no_license | #
# @lc app=leetcode id=1192 lang=python3
#
# [1192] Critical Connections in a Network
#
# @lc code=start
import collections
class Solution:
def criticalConnections(self, n: int, connections):
index = {}
low = {}
G = collections.defaultdict(list)
for v1, v2 in connections:
G[v1].append(v2)
G[v2].append(v1)
i = 0
res = []
def dfs(v, parent):
nonlocal i
index[v] = i
low[v] = i
i += 1
for u in G[v]:
if u == parent:
continue
if u not in index:
dfs(u, v)
low[v] = min(low[v], low[u])
if parent != None and low[v] == index[v]:
res.append([parent, v])
dfs(0, None)
return res
# @lc code=end
print(Solution().criticalConnections(5, [[0,1], [0, 2], [1, 3], [2, 3], [2, 5], [5, 6], [3,4]]))
print(Solution().criticalConnections(5, [[0,1], [0, 2], [1, 2], [0, 3], [3, 4]]))
print(Solution().criticalConnections(4, [[0,1], [1, 2], [2, 3]]))
print(Solution().criticalConnections(5, [[0,1], [1, 2], [2, 0], [1, 3], [1, 4], [1, 6], [3, 5], [4, 5]]))
| true |
f42280ea431710c79d2e45f15da7cb2ab5fe5a03 | Python | e2fyi/py-utils | /e2fyi/utils/aws/s3_resource.py | UTF-8 | 12,402 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | """
Provides `S3Resource` to represent resources in S3 buckets.
"""
import io
import json
import os.path
from uuid import uuid4
from typing import Union, Generic, TypeVar, Callable, Optional
import boto3
from e2fyi.utils.aws.s3_stream import S3Stream
T = TypeVar("T")
StringOrBytes = TypeVar("StringOrBytes", bytes, str)
class S3Resource(Generic[StringOrBytes]):
"""
`S3Resource` represents a resource in S3 currently or a local resource that will
be uploaded to S3. `S3Resource` constructor will automatically attempts to convert
any inputs into a `S3Stream`, but for more granular control `S3Stream.from_any`
should be used instead to create the `S3Stream`.
`S3Resource` is a readable stream - i.e. it has `read`, `seek`, and `close`.
Example::
import boto3
from e2fyi.utils.aws import S3Resource, S3Stream
# create custom s3 client
s3client = boto3.client(
's3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY
)
# creates a local copy of s3 resource with S3Stream from a local file
obj = S3Resource(
# full path shld be "prefix/some_file.json"
filename="some_file.json",
prefix="prefix/",
# bucket to download from or upload to
bucketname="some_bucket",
# or "s3n://" or "s3://"
protocol="s3a://",
# uses default client if not provided
s3client=s3client,
# attempts to convert to S3Stream if input is not a S3Stream
stream=S3Stream.from_file("./some_path/some_file.json"),
# addition kwarg to pass to `s3.upload_fileobj` or `s3.download_fileobj`
# methods
Metadata={"label": "foo"}
)
print(obj.key) # prints "prefix/some_file.json"
print(obj.uri) # prints "s3a://some_bucket/prefix/some_file.json"
# will attempt to fix prefix and filename if incorrect filename is provided
obj = S3Resource(
filename="subfolder/some_file.json",
prefix="prefix/"
)
print(obj.filename) # prints "some_file.json"
print(obj.prefix) # prints "prefix/subfolder/"
Saving to S3::
from e2fyi.utils.aws import S3Resource
# creates a local copy of s3 resource with some python object
obj = S3Resource(
filename="some_file.txt",
prefix="prefix/",
bucketname="some_bucket",
stream={"some": "dict"},
)
# upload obj to s3 bucket "some_bucket" with the key "prefix/some_file.json"
# with the json string content.
obj.save()
# upload to s3 bucket "another_bucket" instead with a metadata tag.
obj.save("another_bucket", MetaData={"label": "foo"})
Reading from S3::
from e2fyi.utils.aws import S3Resource
from pydantic import BaseModel
# do not provide a stream input to the S3Resource constructor
obj = S3Resource(
filename="some_file.json",
prefix="prefix/",
bucketname="some_bucket",
content_type="application/json"
)
# read the resource like a normal file object from S3
data = obj.read()
print(type(data)) # prints <class 'str'>
# read and load json string into a dict or list
# for content_type == "application/json" only
data_obj = obj.load()
print(type(data_obj)) # prints <class 'dict'> or <class 'list'>
# read and convert into a pydantic model
class Person(BaseModel):
name: str
age: int
# automatically unpack the dict
data_obj = obj.load(lambda name, age: Person(name=name, age=age))
# alternatively, do not unpack
data_obj = obj.load(lambda data: Person(**data), unpack=False)
print(type(data_obj)) # prints <class 'Person'>
"""
def __init__(
self,
filename: str,
content_type: str = "",
bucketname: str = "",
prefix: str = "",
protocol: str = "s3a://",
stream: S3Stream[StringOrBytes] = None,
s3client: boto3.client = None,
stats: dict = None,
**kwargs
):
"""
Creates a new instance of S3Resource, which will use
`boto3.s3.transfer.S3Transfer` under the hood to download/upload the s3
resource.
See
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#boto3.s3.transfer.S3Transfer
Args:
filename (str): filename of the object.
content_type (str, optional): mime type of the object. Defaults to "".
bucketname (str, optional): name of the bucket the obj is or should be.
Defaults to "".
prefix (str, optional): prefix to be added to the filename to get the s3
object key. Defaults to "application/octet-stream".
protocol (str, optional): s3 client protocol. Defaults to "s3a://".
stream (S3Stream[StringOrBytes], optional): data stream. Defaults to None.
s3_client (boto3.client, optional): s3 client to use to retrieve
resource. Defaults to None.
Metadata (dict, optional): metadata for the object. Defaults to None.
**kwargs: Any additional args to pass to `boto3.s3.transfer.S3Transfer`
function.
"""
# random name if filename is not provided
filename = filename or uuid4().hex
dirname = os.path.dirname(filename)
if dirname:
filename = filename[len(dirname) + 1 :]
prefix = os.path.join(prefix, dirname) + os.path.sep
if stream:
if not isinstance(stream, S3Stream):
stream = S3Stream.from_any(stream, content_type)
if content_type:
stream.content_type = content_type
self.filename = filename
self._content_type = content_type
self.bucketname = bucketname
self.prefix = prefix
self.protocol = protocol
self._stream: Optional[S3Stream[StringOrBytes]] = stream
self.extra_args = kwargs
self.s3client = s3client
self.last_resp = None
self.stats = stats
@property
def content_type(self) -> str:
"""mime type of the resource"""
if self._stream and hasattr(self._stream, "content_type"):
return self._stream.content_type
return self._content_type or "application/octet-stream"
@property
def key(self) -> str:
"""Key for the resource."""
if not self.filename:
raise ValueError("filename cannot be empty.")
return "%s%s" % (self.prefix, self.filename)
@property
def uri(self) -> str:
"""URI to the resource."""
if not self.bucketname:
raise ValueError("bucketname cannot be empty.")
return "%s%s/%s" % (self.protocol, self.bucketname, self.key)
@property
def stream(self) -> S3Stream[StringOrBytes]:
"""data stream for the resource."""
if self._stream:
return self._stream
if self.bucketname:
stream = io.BytesIO()
s3client = self.s3client or boto3.client("s3")
self.last_resp = s3client.download_fileobj(
self.bucketname, self.key, stream, ExtraArgs=self.extra_args
)
stream.seek(0) # reset to initial counter
self._stream = S3Stream(stream, self._content_type)
# overwrite infered mime if provided
if self._content_type:
self._stream.content_type = self._content_type
return self._stream
raise RuntimeError("S3Resource does not have a stream.")
def read(self, size=-1) -> StringOrBytes:
"""duck-typing for a readable stream."""
return self.stream.read(size) # type: ignore
def seek(self, offset: int, whence: int = 0) -> int:
"""duck-typing for readable stream.
See https://docs.python.org/3/library/io.html
Change the stream position to the given byte offset. offset is interpreted
relative to the position indicated by whence. The default value for whence
is SEEK_SET. Values for whence are:
SEEK_SET or 0 – start of the stream (the default); offset should be zero
or positive
SEEK_CUR or 1 – current stream position; offset may be negative
SEEK_END or 2 – end of the stream; offset is usually negative
Return the new absolute position.
"""
return self.stream.seek(offset, whence) # type: ignore
def close(self) -> "S3Resource":
"""Close the resource stream."""
self.stream.close()
return self
def get_value(self) -> StringOrBytes:
"""Retrieve the entire contents of the S3Resource."""
self.seek(0)
return self.read()
def load(
self, constructor: Callable[..., T] = None, unpack: bool = True
) -> Union[dict, list, T]:
"""
load the content of the stream into memory using `json.loads`. If a
`constructor` is provided, it will be used to create a new object. Setting
`unpack` to be true will unpack the content when creating the object
with the `constructor` (i.e. * for list, ** for dict)
Args:
constructor (Callable[..., T], optional): A constructor function.
Defaults to None.
unpack (bool, optional): whether to unpack the content when passing
it to the constructor. Defaults to True.
Raises:
TypeError: [description]
Returns:
Union[dict, list, T]: [description]
"""
if self.content_type != "application/json":
raise TypeError(
"Content type is '%s' instead 'application/json'." % self.content_type
)
self.seek(0) # reset buffer offset
result = json.loads(self.read())
if not constructor:
return result # type: ignore
if unpack:
if isinstance(result, dict):
return constructor(**result)
if isinstance(result, list):
return constructor(*result)
return constructor(result)
def save(
self, bucketname: str = None, s3client: boto3.client = None, **kwargs
) -> "S3Resource":
"""
Saves the current S3Resource to the provided s3 bucket (in constructor or
in arg). Extra args can be pass to `boto3.s3.transfer.S3Transfer` via
keyword arguments of the same name.
See
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/s3.html#boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS
Args:
bucketname (str, optional): bucket to save the resource to. Overwrites
the bucket name provided in the constructor. Defaults to None.
s3client (boto3.client, optional): custom s3 client to use. Defaults to
None.
**kwargs: additional args to pass to `boto3.s3.transfer.S3Transfer`.
Raises:
ValueError: "S3 bucket name must be provided."
Returns:
S3Resource: S3Resource object.
"""
bucketname = bucketname or self.bucketname
if not bucketname:
raise ValueError("S3 bucket name must be provided.")
self.stream.seek(0)
sample = self.stream.read(10)
self.stream.seek(0)
if isinstance(sample, str):
stream = io.BytesIO(self.stream.read().encode("utf-8"))
else:
stream = self.stream
s3client = s3client or self.s3client or boto3.client("s3")
self.last_resp = s3client.upload_fileobj(
stream,
bucketname,
self.key,
ExtraArgs={"ContentType": self.content_type, **self.extra_args, **kwargs},
)
self.stream.seek(0)
return self
def __str__(self) -> str:
"""String representation of a S3Resource."""
try:
return self.uri
except ValueError:
return self.key
| true |
eadcab4a53de592224b6390847a811b099bbce12 | Python | Skyrich2000/Algorithm | /4_BruteForce/b_1182.py | UTF-8 | 500 | 2.78125 | 3 | [] | no_license | n, s = map(int, input().split())
lst = list(map(int, input().split()))
count = 0
def check(pick, newflag):
newpick = [e for e in pick]
newpick[newflag] = 1
global count
summ = 0
for i in range(n):
summ += lst[i] * newpick[i]
if summ == s:
count += 1
for i in range(newflag + 1, n): #새로운 원소 오른쪽에 있는것부터 원소 추가
check(newpick, i)
_pick = [0 for _ in range(n)]
for i in range(n):
check(_pick, i)
print(count)
| true |
0a5d0e8e8d54a46ce55d71d14295c0396a595f3b | Python | nabeen/AtCoder | /abc/abc028/a.py | UTF-8 | 334 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://atcoder.jp/contests/abc028/tasks/abc028_a
def main() -> None:
N = int(input())
if N <= 59:
print('Bad')
elif N <= 89:
print('Good')
elif N <= 99:
print('Great')
else:
print('Perfect')
if __name__ == '__main__':
main()
| true |
92bbd5cb58f62f019bfadc8597cbf515f93d3ae8 | Python | Fernweh-yang/TUM-Assigments | /Mobile Robotik in der Intralogitik/Python Vorlagen Neu/01_Kinematik/20200930_Kinematik_Aufgabe3_Studentenversion.py | UTF-8 | 14,563 | 3.171875 | 3 | [] | no_license | from __future__ import print_function
from __future__ import division
import time as t
import brickpi3
import math
import numpy as np
inf = math.inf #Unendlich definieren
#Test for infinity: x = inf; math.isinf(x)
########################################
#BrickPi und dessen Anschlüsse definieren
#----------------------------------------
# BrickPi instanziieren
BP = brickpi3.BrickPi3()
#Motoranschlüsse definieren
motor_l = BP.PORT_D
motor_r = BP.PORT_A
# Sensoranschlüsse definieren
sensor_distance = BP.PORT_1 #Abstandssensor an Anschluss 1
sensor_gyro = BP.PORT_2 #Gyrosensor an Anschluss 2
#Sensoranschlüsse definieren
BP.set_sensor_type(BP.PORT_1, BP.SENSOR_TYPE.EV3_ULTRASONIC_CM) #Abstand an Anschluss 1
BP.set_sensor_type(BP.PORT_2, BP.SENSOR_TYPE.EV3_GYRO_ABS) #Gyro an Anschluss 2
#Motorlimit NICHT ENTFERNEN!!!
#Motoren begrenzt auf 90% der Nennleistung
BP.set_motor_limits(motor_l, 90, 0)
BP.set_motor_limits(motor_r, 90, 0)
########################################
#Weltkoordinaten
new_position = [0,0,0,0] #x in m, y in m, Winkel in °, Winkel aus Gyro in °
coordinates = [0,0,0] #x in m, y in m, Winkel in °
########################################
#Fahrzeuggeometrie
# Eingabe der Fahrzeugparameter und Berechnung weiterer Werte
# z_motor [-]: Anzahl Zähne am Zahnrad auf der Motorseite
# z_wheel [-]: Anzahl Zähne am Zahnrad auf der Radseite
# d_wheel [mm]: Raddurchmesser
# track_inside [mm]: Abstand der Innenflächen der Räder
# track_outside [mm]: Abstand der Außenflächen der Räder
# track_compensation [-]: Korrekturfaktor für den nicht mittigen Radaufstand
# ratio [-]: Getriebeübersetzung; Berechnung aus den Zähnezahlen
# track [mm]: Spurbreite; Berechnung aus den Radabständen mit dem Korrekturfaktor
z_motor = 8
z_wheel = 24
d_wheel = 72 #Raddurchmesser (in mm)
track_inside = 132 #innerer Radabstand (in mm)
track_outside = 204 #äußerer Radabstand (in mm)
track_compensation = 0.94 #Kompensationsfunktion, weil wir den Radabstand überschätzen (Rad steht nicht genau mittig auf)
#Geometrieberechnung
ratio = z_wheel / z_motor #Getriebeübersetzung
track = ((track_inside + (track_outside - track_inside)/2)/1000)*track_compensation #Spurbreite (in m)
########################################
########################################
# Fahrfunktion
########################################
# Beschreibung
#---------------------------------------
# Ausführen von Fahrbefehlen je nach Eingabe:
# Geradeausfahren: v = Robotergeschwindigkeit
# radius = math.inf
# omega = 0
# Kurvenfahren: v = Robotergeschwindigkeit
# radius = Kurvenradius (> 0 --> Linkskurve; < 0 Rechtskurve)
# omega = 0
# Drehen auf der Stelle: v = 0
# radius = 0
# omega = Drehgeschwindigkeit des Roboters
########################################
# Eingangsparameter
#---------------------------------------
# v [m/s]: Geschwindigkeit
# radius [m]: Kurvenradius
# omega [°/s]: Winkelgeschwindigkeit für Drehung auf der Stelle
# last_call [s]: Zeitpunkt des letzten Methodenaufrufs
# last_position [m,m,°]: Position beim letzten Funktionsaufruf im globalen Koordinatensystem
# last_speed [m/s]: Geschwindigkeit beim letzten Funktionsaufruf
# last_radius [m]: Kurvenradius beim letzten Funktionsaufruf
# last_omega [°/s]: Winkelgeschwindigkeit beim letzten Funktionsaufruf
########################################
# Parameter innerhalb der Funktion
#---------------------------------------
# current_call [s]: aktueller Zeitpunkt
# delta_t [s]: vergangene Zeit seit dem letzten Aufruf
# last_v_r, last_v_l [m/s]: Geschwindigkeit am rechten/linken Rad beim letzten Funktionsaufruf
# new_position[x,y,theta,theta(aus gyro)] [m,m,°,°] : Vektor mit der aktuellen Position des Roboters
# omega_z [rad/s] : Rotationsgeschwindigkeit des Roboters um die eigene z-Achse
# omega_r, omega_l [rad/s] : Drehgeschwindigkeit des rechten/linken Rades
# dps_r, dps_l [°/s] : Drehgeschwindigkeit des rechten/linken Motors
########################################
# Funktionsausführung
def Fahren(v, radius, omega, last_call, last_position, last_speed, last_radius, last_omega): #Einheiten: m/s; m; °/s; s; [m,m,°]; m/s; m, °/s
#Odometriedaten seit dem letzten Aufruf berechnen
current_call = t.perf_counter()
delta_t = current_call - last_call
#print(delta_t)
if math.isinf(last_radius): # Geradeausfahren, d. h. radius = unendlich
last_omega_z = 0
last_v_r = last_speed
last_v_l = last_speed
elif last_radius != 0: #Kurve fahren um den Momentanpol
if last_radius==0:
last_omega_z=0
else:
last_omega_z = (last_speed / last_radius)*180/math.pi #Drehgeschwindigkeit berechnen
last_v_r = last_speed + (track/2)*last_omega_z
last_v_l = last_speed - (track/2)*last_omega_z
elif last_speed==0 and last_radius==0: #Drehen auf der Stelle
last_omega_z = last_omega
last_v_r = (track/2)*last_omega_z
last_v_l = - (track/2)*last_omega_z
#Odometrieberechnung
#new_position = [x-Position, y-Position, Orientierung (z-Winkel)], Orientierung (z-Winkel aus Gyro)]
new_position[0] = last_position[0] + last_speed*delta_t*math.cos(last_position[2] + ((((last_v_r - last_v_l)/track) * delta_t) / 2)) #x-Position
new_position[1] = last_position[1] + last_speed*delta_t*math.sin(last_position[2] + ((((last_v_r - last_v_l)/track) * delta_t) / 2)) #y-Position
new_position[2] = last_position[2] + (((last_v_r - last_v_l)/track) * delta_t) #Orientierung in °
new_position[3] = -1*BP.get_sensor(sensor_gyro) #Winkel aus Gyro in °
#print(new_position)
coordinates[0] = coordinates[0] + last_speed*delta_t*math.cos((coordinates[2]*math.pi) / 180)# + ((((last_v_r - last_v_l)/track) * delta_t) / 2)) #x-Position
coordinates[1] = coordinates[1] + last_speed*delta_t*math.sin((coordinates[2]*math.pi) / 180)# + ((((last_v_r - last_v_l)/track) * delta_t) / 2)) #y-Position
coordinates[2] = coordinates[2] + (((last_v_r - last_v_l)/track) * delta_t) #Orientierung in °
#neue Geschwindigkeiten der Motoren berechnen
if v==0 and radius==0: # auf der Stelle drehen
omega_z = omega * math.pi / 180 #Umrechnung von °/s in rad/s
omega_r = ((track / (d_wheel/1000))*omega_z) #Ergebnis in rad/s
omega_l = -((track / (d_wheel/1000))*omega_z) #Ergebnis in rad/s
dps_r = (ratio * omega_r * 360) / (2*math.pi) #Umrechnung von rad/s in °/s
dps_l = (ratio * omega_l * 360) / (2*math.pi) #Umrechnung von rad/s in °/s
elif math.isinf(radius) and omega == 0 and v != 0: #Geradeaus fahren
omega_r = ((2 / (d_wheel/1000))*v) #Ergebnis in rad/s
omega_l = ((2 / (d_wheel/1000))*v) #Ergebnis in rad/s
dps_r = (ratio * omega_r * 360) / (2*math.pi) #Umrechnung von rad/s in °/s
dps_l = (ratio * omega_l * 360) / (2*math.pi) #Umrechnung von rad/s in °/s
elif radius != 0 and v != 0: #Kurve fahren, negatives Omega: Rechtskurve, positives Omega: Linkskurve
omega_z = v / radius #Drehgeschwindigkeit berechnen
omega_r = ((2 / (d_wheel/1000))*v) + ((track / (d_wheel/1000))*omega_z) #Ergebnis in rad/s
omega_l = ((2 / (d_wheel/1000))*v) - ((track / (d_wheel/1000))*omega_z) #Ergebnis in rad/s
dps_r = (ratio * omega_r * 360) / (2*math.pi) #Umrechnung von rad/s in °/s
dps_l = (ratio * omega_l * 360) / (2*math.pi) #Umrechnung von rad/s in °/s
# Geschwindigkeiten der Motoren setzen
BP.set_motor_dps(motor_r, dps_r)
BP.set_motor_dps(motor_l, dps_l)
# Rückgabe der neu berechneten Position
return new_position
########################################
# Stoppfunktion
########################################
# Beschreibung
#---------------------------------------
# setzt die Drehzahl der Motoren auf 0
########################################
def stop():
BP.set_motor_power(motor_r, 0)
BP.set_motor_power(motor_l, 0)
########################################
########################################
# Hauptfunktion
########################################
# Beschreibung
#---------------------------------------
# Die Funktion lässt den Roboter beliebig viele Wegpunkte abfahren, die in einer Matrix eingegeben werden.
# Es wird immer geradlinig von Punkt zu Punkt gefahren und dort gedreht.
########################################
# Parameter innerhalb der Funktion
#---------------------------------------
# point_start = [x,y] [m,m] : Startpunkt des Roboters. Wird auf [0,0] gesetzt
# points = [[x1,y1],[x2,y2],..] [[m,m],[m,m],..] : Matrix mit allen Wegpunkten
# number_of_points : Größe der "points"-Matrix
# Distances : Liste mit allen berechneten Entfernungen zwischen den Wegpunkten
# Angles : Liste mit allen Winkeln zwischen den Verbindungen der Wegpunkte (entspr. den Drehwinkeln an den Punkten)
# position_end : Zielposition für den Fahrbefehl (Strecke oder Winkel)
# determinante : Wert der Determinante für die Bestimmung der Drehrichtung
# direction1, direction2 : (Richtungs-)Vektoren des letzten und nächsten Abschnitts
# dot : Vektorprodukt der beiden aktuellen Richtungen
# length1, length2 : Längen der beiden aktuellen Vektoren
# angle : Winkel zwischen den beiden aktuellen Vektoren
# new_speed [m/s] : neu gesetzte Geschwindigkeit
# new_radius [m] : neu gesetzter Kurvenradius
# new_omega [°/s] : neu gesetzte Winkelgeschwindigkeit
# i, k, n, m : Laufvariablen
########################################
# Funktionsausführung
try:
t.sleep(5) # Wartezeit, bis der Sensor bereit ist
########################################
#Definition der Wegpunkte in Matrix, x-Koordinate, y-Koordinate
point_start = np.array([0,0])
points =
########################################
#Berechnung der Entfernungen und Drehwinkel
number_of_points = points.shape
Distances = np.zeros(number_of_points[0])
Angles = np.zeros(number_of_points[0])
position_end = 0
determinante = 0
i = 0
for i in range(0, number_of_points[0]):
if i==0:
direction1=np.array([1, 0]) # Orientierung des Roboters zu Beginn
direction2=points[i,:] # Ortsvektor des ersten Punktes
else:
direction2=points[i,:]-points[i-1,:] # Verbindungsvektor vom aktuellen zum nächsten Punkt
dot=
length1=
length2=
if length1 == 0 or length2 == 0:
angle=0
else:
angle =
# Prüfen ob Drehung nach links oder rechts
determinante = direction1[0]*direction2[1] - direction1[1]*direction2[0]
if determinante < 0:
angle = -1*angle
Distances[i]=length2 # Aufnahme der Entfernung in die Liste der Fahrbefehle
direction1=direction2 # merken der Richtung für den nächsten Schritt
Angles[i]=angle # Aufnahme des Winkels in die Liste der Drehbefehle
i=i+1
ColumnsRows=Distances.shape
########################################
i = 0
k = 0
n = 0
m = 0
last_position = [0, 0, 0, 0] # x, y, theta, theta(gyro)
print(last_position)
print(coordinates)
while True:
while i < 2*ColumnsRows[0]: #alle Punkte der Tabelle abfahren
compare = 0
k = i + 1
if k%2 != 0:
# Koordinaten zurücksetzen
last_position = [0, 0, 0, 0] # x, y, theta, theta(gyro)
last_call = t.perf_counter()
last_speed = 0
last_radius = 0
last_omega = 0
position_end = Angles[m]*180/math.pi
new_speed = 0 #m/s; v < 0.15 m/s; v<0 ist Rückwärtsfahrt
new_radius = 0 #m, bei Geradeausfahrt: math.inf; Radius > 0: Linkskurve; Radius < 0: Rechtskurve
if position_end < 0:
new_omega = -10 #°/s wird nur für Drehung auf er Stelle benötigt
else:
new_omega = 10
m = m + 1
winkel=1
else:
# Koordinaten zurücksetzen
last_position = [0, 0, 0, 0] # x, y, theta, theta(gyro)
last_call = t.perf_counter()
last_speed = 0
last_radius = 0
last_omega = 0
position_end = Distances[n]
new_speed = 0.1 #m/s; v < 0.15 m/s; v<0 ist Rückwärtsfahrt
new_radius = math.inf #m, bei Geradeausfahrt: math.inf; Radius > 0: Linkskurve; Radius < 0: Rechtskurve
new_omega = 0 #°/s wird nur für Drehung auf er Stelle benötigt
n = n + 1
winkel=0
i = i + 1
while abs(compare) < abs(position_end): # solange der Teilabschnitt nicht abgeschlossen ist
t.sleep(0.1) # bei kleineren Werten (0,001) gab es Probleme mit der Aus- und Eingabe.
try:
distance = BP.get_sensor(sensor_distance)
except brickpi3.SensorError as error:
print(error)
if distance < 10:
stop()
break
else:
# Beim ersten Aufruf für delta_t sorgen
if compare == 0:
last_call = t.perf_counter()
# Aufruf der Fahrfunktion
last_position = Fahren(new_speed, new_radius, new_omega, last_call, last_position, last_speed, last_radius, last_omega) # Fahrfunktion aufrufen
# Kontrollwert je nach Bewegungstyp setzen
if winkel !=0:
compare=last_position[2]
else:
compare=math.sqrt(last_position[0]*last_position[0]+last_position[1]*last_position[1])
last_speed = new_speed # aktuellen Werte als vergangene Werte für den nächsten Aufruf merken
last_radius = new_radius
last_omega = new_omega
last_call = t.perf_counter() # aktuellen Zeitpunkt für den nächsten Aufruf merken
stop()
print(new_position)
print(coordinates)
stop()
BP.reset_all()
except KeyboardInterrupt:
BP.reset_all()
| true |
8958ac4b6eb27410f6f4c2f9da144e53f7c17762 | Python | arshharkial/Stock-Price-Prediction | /closing_price.py | UTF-8 | 2,481 | 3.140625 | 3 | [] | no_license | #Importind necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import LSTM, Dense
from sklearn.preprocessing import MinMaxScaler
import h5py
#Importing dataset
dataset = pd.read_csv("NSE-Tata-Global-Beverages-Limited.csv")
dataset.head()
#Converting Date from string to Python Date Time
dataset.Date = pd.to_datetime(dataset.Date, format = "%Y-%m-%d")
dataset.index = dataset.Date
dataset = dataset.sort_index(ascending = True, axis = 0)
#Plotting closing points
plt.plot(dataset.Close, label = "Close Price History")
#Create new_dataset from Date and Close columns
new_dataset = dataset[['Date', 'Close']].copy()
#Normalise data
train_size = 0.8
split = len(dataset) * train_size
scaler = MinMaxScaler()
train_data = new_dataset.iloc[0 : int(split), : ].values
test_data = new_dataset.iloc[int(split) : , : ].values
scaled_dataset = scaler.fit_transform(new_dataset.iloc[:, 1].values.reshape(-1, 1))
new_dataset.drop("Date", axis = 1, inplace = True)
x_train,y_train = [], []
for i in range(60,len(train_data)):
x_train.append(scaled_dataset[i-60 : i, 0])
y_train.append(scaled_dataset[i , 0])
x_train, y_train = np.array(x_train) , np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
#Create model
model = Sequential([
LSTM(units = 50, return_sequences = True, input_shape = (x_train.shape[1], 1)),
LSTM(units = 50),
Dense(1)
])
model.compile(loss = "mean_squared_error", optimizer = "adam")
model.fit(x_train, y_train, epochs = 9, batch_size = 8, verbose = 2)
#Create dataset to make prediction using lstm model
inputs_data = new_dataset[len(new_dataset) - len(test_data) - 60:].values
inputs_data = inputs_data.reshape(-1, 1)
inputs_data = scaler.transform(inputs_data)
X_test=[]
for i in range(60,inputs_data.shape[0]):
X_test.append(inputs_data[i-60:i,0])
X_test=np.array(X_test)
X_test=np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))
#Create prediction
closing_price_prediction = model.predict(X_test)
closing_price_prediction = scaler.inverse_transform(closing_price_prediction)
#Plot Predicted stock costs vs actual stock costs
train_data=new_dataset[ : int(split)]
test_data=new_dataset[int(split) : ]
test_data['Predictions'] = closing_price_prediction
plt.plot(train_data["Close"])
plt.plot(test_data[['Close',"Predictions"]]) | true |
a9b84ae36ac10e4fd4747f8f96769fcadaa0414b | Python | Hnton/math318 | /Project10.py | UTF-8 | 1,047 | 3.390625 | 3 | [] | no_license | # Mikael Hinton
# Project 10
def eucx(d0,d1):
x0=1
y0=0
x1=0
y1=1
while d1 !=0:
q=d0//d1
d2=d1
x2=x1
y2=y1
d1=d0%d1
x1=x0-q*x1
y1=y0-q*y1
d0=d2
x0=x2
y0=y2
# print("d0=",d0,"d1=",d1,"q=",q,"x0=",x0,"x1=",x1,"x2=",x2,"y0=",y0,"y1",y1,"y2=",y2)
return (d0,x0,y0)
def modexp(base, exponent, mod):
prod=1
while exponent>0:
if exponent%2==1:
prod=(prod*base)%mod
base=base**2%mod
exponent=exponent//2
return prod
p=10174093
q=10176827
print()
print("p:",p)
print("q:",q)
#Find the M
m = p*q
print("m:",m)
#Find the phi
phi = (p-1)*(q-1)
print("phi:",phi)
#Pick an E
e = 1009
print("e:",e)
#Find decryption Exponent
s = eucx(e, phi)
print()
print("Euclidian Algorithm Extended:",s)
#middle number of eucx, make sure to get a 1 and if you dont then change e
d = 5130820812289
msg = 321321321
print(" Message:", msg)
#encode msg
cipher = modexp(msg, e, m)
print(" Encrypted:", cipher)
#decrypt msg
decr = modexp(cipher, d, m)
print(" Decrypted:", decr)
print()
| true |
e6c3459e711c9a2c0a40c8cd12414eb4bc9f0ef5 | Python | popexizhi/client2app | /httper.py | UTF-8 | 1,664 | 2.515625 | 3 | [] | no_license | #-*- coding:utf8 -*-
import urllib, urllib2
import cookielib
import json
class httper():
def __init__(self, http_ip = "192.168.1.43"):
self.http_ip = http_ip
def register_app_server(self, url_id, name, key, serial):
"post id+key+serial /api/admin/register_app_server"
self.add_licenses = "http://"+ self.http_ip +":18080/api/admin/register_app_server"
data = json.dumps({"app_server_id":url_id, "customer_name":name, "license_key":key, "serial":serial})
return self._send_data(self.add_licenses, data)
def add_appserver_lic(self, name):
self.add_licenses = "http://"+ self.http_ip +":18080/api/biz/licenses"
data = json.dumps({"customer_name": name,"product_name":"app_server"})
return self._send_data(self.add_licenses, data)
def add_dev_lic(self, applications_name, email = "testdev@senlime.com"):
self.add_dev_licenses = "http://"+ self.http_ip +":18080/api/admin/pin"
data = json.dumps({"email": email,"applications":applications_name})
return self._send_data(self.add_dev_licenses, data)
def _send_data(self, url, data):
req = urllib2.Request(url, data, {'Content-Type': 'application/json'}) #添加发送头
f = urllib2.urlopen(req)
get_data = f.read()
f.close()
return json.loads(get_data)
if __name__ == "__main__":
a = httper()
name = "xd"
res = a.add_appserver_lic(name)
print res
assert res["result"] == 0 # 判断返回为成功
print a.register_app_server(32, name, res["key"], res["serial"])
#print a.add_dev_lic([2])
| true |
a90e189a44d0a80ba51b80d4ce19d72031a91196 | Python | Sasium/randomstuff | /processor.py | UTF-8 | 4,828 | 3.25 | 3 | [] | no_license |
def matrix_reader(a, b):
print(b)
matrix_parameters = a.split()
return [input().split() for _ in range(int(matrix_parameters[0]))]
def main_transpose(a):
return [[x[i] for x in a] for i in range(len(a[0]))]
def side_transpose(a):
return [[x[i] for x in a[::-1]] for i in range(len(a[0]) - 1, -1, -1)]
def vertical_transpose(a):
return [x[::-1] for x in a]
def horizontal_transpose(a):
return [[x for x in a[i]] for i in range(len(a) - 1, -1, -1)]
def multiply(a, c):
return [[c * float(x) for x in a[y]]
for y in range(len(a))]
def addition(a, b):
result = [[0 for x in range(len(a[0]))] for y in range(len(a))]
for i in range(len(a)):
for j in range(len(a[0])):
result[i][j] = float(a[i][j]) + float(b[i][j])
return result
def multiply_matrices(a, b):
result = [[0 for x in range(len(b[0]))] for y in range(len(a))]
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
result[i][j] += float(a[i][k]) * float(b[k][j])
return result
def determinant(a):
if len(a) == 1:
return float(a[0][0])
if len(a) == 2:
return float(a[0][0]) * float(a[1][1])\
- float(a[0][1]) * float(a[1][0])
if len(a) == 3:
return float(a[0][0]) * float(a[1][1]) * float(a[2][2])\
+ float(a[0][1]) * float(a[1][2]) * float(a[2][0])\
+ float(a[0][2]) * float(a[1][0]) * float(a[2][1])\
- float(a[0][2]) * float(a[1][1]) * float(a[2][0])\
- float(a[0][0]) * float(a[1][2]) * float(a[2][1])\
- float(a[0][1]) * float(a[1][0]) * float(a[2][2])
s = 0
for i in range(len(a)):
minor = [row[:i] + row[i + 1:] for row in (a[:0] + a[0 + 1:])]
s += float(a[0][i]) * (-1) ** (2 + i) * determinant(minor)
return s
def minor_calc(a, b, c):
return [row[:b] + row[b + 1:] for row in (a[:c] + a[c + 1:])]
def inverse(a):
det_inverse = determinant(a)
if det_inverse == 0:
return "This matrix doesn't have an inverse."
cofactor_matrix = [[0 for x in range(len(a[0]))] for y in range(len(a))]
for i in range(len(a)):
for j in range(len(a)):
minor = [row[:j] + row[j + 1:] for row in (a[:i] + a[i + 1:])]
cofactor_matrix[i][j] = (-1) ** (2 + i + j) * determinant(minor)
transpose_inverse = main_transpose(cofactor_matrix)
inversed_matrix = multiply(transpose_inverse, 1 / det_inverse)
for i in range(len(a)):
for j in range(len(a[0])):
inversed_matrix[i][j] = round(inversed_matrix[i][j], 3)
if inversed_matrix[i][j] == 0.0:
inversed_matrix[i][j] = 0
return inversed_matrix
while True:
option = int(input('''1. Add matrices
2. Multiply matrix by a constant
3. Multiply matrices
4. Transpose matrix
5. Calculate a determinant
6. Inverse matrix
0. Exit
Your choice:'''))
if option == 1 or option == 3:
matrix_1 = matrix_reader(input("Enter size of first matrix:"),
"Enter first matrix:")
matrix_2 = matrix_reader(input("Enter size of second matrix:"),
"Enter second matrix:")
if option == 1:
result_matrix = addition(matrix_1, matrix_2)
else:
result_matrix = multiply_matrices(matrix_1, matrix_2)
elif option == 2:
matrix = matrix_reader(input("Enter size of matrix:"), "Enter matrix:")
result_matrix = multiply(matrix,
float(input("Enter constant:")))
elif option == 4:
sub_option = int(input('''1. Main diagonal
2. Side diagonal
3. Vertical line
4. Horizontal line
Your choice:'''))
matrix = matrix_reader(input("Enter size of matrix:"), "Enter matrix:")
if sub_option == 1:
result_matrix = main_transpose(matrix)
elif sub_option == 2:
result_matrix = side_transpose(matrix)
elif sub_option == 3:
result_matrix = vertical_transpose(matrix)
else:
result_matrix = horizontal_transpose(matrix)
elif option == 5:
matrix = matrix_reader(input("Enter size of matrix:"), "Enter matrix:")
det = determinant(matrix)
elif option == 6:
matrix = matrix_reader(input("Enter size of matrix:"), "Enter matrix:")
result_matrix = inverse(matrix)
else:
break
print("The result is:")
if option == 6 and isinstance(result_matrix, str):
print(result_matrix)
elif option != 5:
col_width = max(len(str(word)) for row in result_matrix for word in row) + 2
for row in result_matrix:
print(' '.join(str(word).ljust(col_width) for word in row))
else:
print(det)
| true |
3fbbf063764ee7a14e3881d3f6b083e6b1f5cbf5 | Python | agermain/Codewars | /6-kyu/are-they-the-"same"?/python/solution.py | UTF-8 | 323 | 3.140625 | 3 | [] | no_license | import math
def comp(array1, array2):
print(array1, array2)
if array1 is None or array2 is None:
return False
array1 = [abs(x) for x in array1]
for num in array2:
if math.sqrt(num) in array1:
array1.remove(math.sqrt(num))
else:
return False
return True
| true |
17de7975359b1d1107e660652cf15fa66a77cab3 | Python | raechen1007/ga_synthesiser | /individual.py | UTF-8 | 3,713 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import sqrt
import numpy as np
from numpy.random import choice
from operators.mutations import uniformMutation
'''
Initial individuals generating functions
'''
def uniform(data):
return np.vstack([choice(np.unique(row),size=data.shape[1],replace=True) for row in data])
def univariate(data):
return np.vstack([choice(row,size=data.shape[1],replace=True) for row in data])
def mutate(data, pm=.3):
return uniformMutation(data, pm)
'''
Scalar for fitness variable
'''
def fitSca(fit_var, weight):
'''
weight=weights of fitness objectives, weight=[w1,w2,w3...]
'''
fit_sca=0
for i in range(len(weight)):
fit_sca+=(weight[i]*fit_var[i])**2
fit_sca=sqrt(fit_sca)/sqrt(len(weight))
return fit_sca
'''
Individual class:
Individual class stores original data, individual, m, shape, original full
contingency table, weights of objectives, objectives, fitness variable,
fitness scalar.
'''
class Individual(object):
'''
The key features of the original data needed in the synthesizer are:
• Number of rows (cases)
• Shape of data: the shape of variables in the data. For example, if
the data has three variables: a binary variable, a 3-categories
variable and a 5-categories variable, then the shape of data will
be (2,3,5)
• Full table: full contingency table of the data. It is a
high-dimensional table. For example, a data has three variables
will has a 3-dimensional table. Full table can be used to capture
almost all statistical properties from a categorical data.
'''
def __init__(self, syn_data, ori_data, m, shape, ori_full_table,
obj, weight, fitvar=None, fitsca=None):
'''
full_divergence, dcap, rcap are boolean
'''
self.syn_data=syn_data
self.ori_data=ori_data
self.m=m
self.shape=shape
self.ori_full_table=ori_full_table
self.obj=obj
self.weight=weight
def initialise(self, generator, *args):
'''
Initialise individual
'''
self.syn_data=generator(self.ori_data, *args)
def evenWeights(self):
'''
Check if weights of objecitves matching the number of objectives
and normalised
If not, correct weights
'''
if self.obj!=None:
number_obj=len(self.obj)
'''
Assess if weights are in right format.
Normalise weights
'''
if type(self.weight)!=list:
self.weight=list(self.weight)
if self.weight==None or len(self.weight)!=number_obj:
self.weight=[1/number_obj]*number_obj
#default weights are equal for all objectives
else:
self.weight=[value/sum(self.weight) for value in self.weight]
else:
raise ValueError('Objective is required.')
'''
Evaluate individual fitness
'''
def fitnessVariable(self):
fitvar=[]
for fun in self.obj:
fitvar.append(fun(ori_data=self.ori_data, syn_data=self.syn_data,
m=self.m, shape=self.shape,
ori_full_table=self.ori_full_table))
self.fitvar=fitvar
def fitnessScalar(self):
if self.weight==1:
self.fitsca=self.fitvar
else:
self.fitsca=fitSca(self.fitvar, self.weight)
| true |
1e87ded83040317823fc56cf89ab99fb9a55d7ce | Python | peterdsharpe/AeroSandbox | /aerosandbox/dynamics/point_mass/point_1D/horizontal.py | UTF-8 | 1,894 | 2.859375 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-other-permissive"
] | permissive | from aerosandbox.dynamics.point_mass.point_3D.cartesian import DynamicsPointMass3DCartesian
from aerosandbox.weights.mass_properties import MassProperties
import aerosandbox.numpy as np
from typing import Union, Dict, Tuple
class DynamicsPointMass1DHorizontal(DynamicsPointMass3DCartesian):
"""
Dynamics instance:
* simulating a point mass
* in 1D, oriented horizontally (i.e., the .add_gravity() method will have no effect)
State variables:
x_e: x-position, in Earth axes. [meters]
u_e: x-velocity, in Earth axes. [m/s]
Control variables:
Fx_e: Force along the Earth-x axis. [N]
"""
def __init__(self,
mass_props: MassProperties = None,
x_e: Union[float, np.ndarray] = 0,
u_e: Union[float, np.ndarray] = 0,
):
# Initialize state variables
self.mass_props = MassProperties() if mass_props is None else mass_props
self.x_e = x_e
self.y_e = 0
self.z_e = 0
self.u_e = u_e
self.v_e = 0
self.w_e = 0
# Initialize indirect control variables
self.alpha = 0
self.beta = 0
self.bank = 0
# Initialize control variables
self.Fx_e = 0
self.Fy_e = 0
self.Fz_e = 0
@property
def state(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"x_e": self.x_e,
"u_e": self.u_e,
}
@property
def control_variables(self) -> Dict[str, Union[float, np.ndarray]]:
return {
"Fx_e": self.Fx_e,
}
def state_derivatives(self) -> Dict[str, Union[float, np.ndarray]]:
derivatives = super().state_derivatives()
return {
k: derivatives[k] for k in self.state.keys()
}
if __name__ == '__main__':
dyn = DynamicsPointMass1DHorizontal()
| true |
739a784cf65f6169cc5d7abcef6ca5d02a734b04 | Python | MRonald/exercicios-python | /Heranca/Amigo.py | UTF-8 | 624 | 3.03125 | 3 | [] | no_license | from Heranca.Contato import Contato
from Heranca.Pessoa import Pessoa
class Amigo(Pessoa, Contato):
todos_amigos = []
def __init__(self, nome, idade, sexo, email, linkedin, github):
self.nome = nome
self.idade = idade
self.sexo = sexo
self.email = email
self.linkedin = linkedin
self.github = github
self.todos_amigos.append(f"Nome: {self.nome} | Idade: {self.idade} | Sexo: {self.sexo} | Email: {self.email}")
def getAllAmigos(self):
print("---- LA ----")
for i in self.todos_amigos:
print(i)
print("------------")
| true |
15252a6666d41cc1ff9aa6099518214563b0e3b9 | Python | ucsb-coast-lab/area_from_svo | /plot_results.py | UTF-8 | 4,120 | 3 | 3 | [] | no_license | #!/usr/bin/python3
import numpy as np
from numpy import genfromtxt
import os, sys
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
# We're going to open the results csv file, and write all the valid results to another
# data file
def import_data_from_csv(filename):
# filename = "results.csv"
fltrd_fname = "filtered_"+filename;
print(fltrd_fname)
# filter the .csv file in case of error messages, only accepts if there are two columns
with open(filename, "r") as f:
lines = f.readlines()
#os.remove(filename)
with open(fltrd_fname, "w") as f:
for line in lines:
items = line.split(',')
if len(items) == 2:
f.write(line)
my_data = genfromtxt(fltrd_fname, delimiter=',')
x = my_data[:,0]
y = my_data[:,1]
return x,y
def build_plots(x,y):
# Start to build the histogram
#first_edge, last_edge = y.min(), y.max()
#n_equal_bins = 300
#bin_edges = np.linspace(start=first_edge, stop=25000,num=n_equal_bins + 1, endpoint=True)
#hist, bin_edges = np.histogram(y)
# plot the histogram
#n, bins, patches = plt.hist(y, n_equal_bins, facecolor='#0504aa', alpha=0.7,rwidth=0.85)
#n, bins, patches = plt.hist(y[y>250], bins='auto', facecolor='blue', alpha=0.5,rwidth=0.85) # auto-define number of bins
# K means has a tendency to create some outliers in our area data, so we filter out results above a certain threshold since we can
# guess that they will probably not be accurate values
data_size = len(x);
max_area = 350000;
deleted_total = 0
bad_indices = [];
for i in range(0,data_size-1):
if y[i] > max_area:
bad_indices.append(i);
deleted_total +=1;
x = np.delete(x,bad_indices);
y = np.delete(y,bad_indices);
print("deleted_total",deleted_total)
print("data size was {},now should be {}".format(data_size,len(x)))
plt.subplot(1,2,1)
n, bins, patches = plt.hist(y[y>250], bins='auto', facecolor='blue', alpha=0.5,rwidth=0.85) # auto-define number of bins
(mu, sigma) = norm.fit(y)
curve = norm.pdf(bins,mu,sigma)
plt.plot(bins, curve*bins[round(len(bins)/2)], 'b--', linewidth=2)
plt.xlabel('Value')
plt.ylabel('Frequency')
#plt.xlim(0,40000)
plt.title('Distribution of Estimated Target Area $\mu={:.3}$, $\sigma={:.3}$'.format(mu,sigma))
plt.ylabel('Binned Frames')
plt.xlabel('Estimated Target Area (mm^2)')
plt.subplot(1,2,2)
plt.bar(x, y, width=0.8, bottom=None, align='center', data=None)
plt.scatter(x,y,marker='.',color='black',alpha=0.6,s=1)
#plt.ylim(0,25000)
plt.axhline(y=150000, color='b', linestyle='--')
plt.ylabel('Estimated Target Area (mm^2)')
plt.xlabel("Frame Number")
plt.title('Estimated Frame Target Area')
plt.show()
def select_subset_from_total_data(frames,x,y):
subset_y = np.zeros([len(frames)]);
subset_x = np.zeros([len(frames)]);
i = 0;
for frame in frames:
print("Find the values in frame ",frame);
for val in range(0,len(x)):
if x[val] == frame:
subset_x[i] = frame;
subset_y[i] = y[val];
print("x = ",frame,"y = ",y[val]);
break;
i = i + 1;
return subset_x,subset_y
def main():
#x,y = import_data_from_csv('results.csv')
x,y = import_data_from_csv(sys.argv[1])
build_plots(x,y);
frames = [308,310,311,312,323,325,326,327,328,329,330,331,345,346,347,348,349,350,351,352,353,354,354,355,356,357,358,359,360,361,362,363,364,365,366,436,666,672,694,695,696,697,698,699,700,701,702,703,704,705,726,727,728,729,730,731,763,773,774,775,776,777,778,779,780,781,782,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827];
#frames = [308,310,311];
subset_x,subset_y = select_subset_from_total_data(frames,x,y);
build_plots(subset_x,subset_y)
if __name__ == "__main__":
main()
| true |
d665433fe48b046af508245da8031887e9d8a6c4 | Python | b21quocbao/Competitive-Programming | /BGGCD.PY | UTF-8 | 516 | 2.609375 | 3 | [] | no_license | from __future__ import print_function
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
import os.path
import math
if os.path.isfile("test.inp"):
sys.stdin = open("test.inp", "r")
sys.stdout = open("test.out", "w")
elif os.path.isfile("BGGCD.inp"):
sys.stdin = open("BGGCD.inp", "r")
sys.stdin = open("BGGCD.out", "w")
while (1):
a, b = map(int, input().split())
if a == 0 and b == 0:
sys.exit()
print (math.gcd(a, b), a * b // math.gcd (a, b))
| true |
df660c52545797c62fd4cec61d5dc2f4d2d4d982 | Python | Orlha/TLS | /TLS/kuznyechik/block_cipher_mode.py | UTF-8 | 3,017 | 2.640625 | 3 | [] | no_license | import os
import sys
block_cipher_mode_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(block_cipher_mode_dir, '../../'))
from TLS.kuznyechik.block_cipher import *
class BlockCipherMode:
def __init__(self, block_cipher: BlockCipher):
self.cipher = block_cipher
def encrypt(self, plain_text: bytes, key: bytes, **kwargs) -> bytes:
return plain_text
def decrypt(self, cipher_text: bytes, key: bytes, **kwargs) -> bytes:
return cipher_text
class CTR_ACPKM(BlockCipherMode):
def __init__(self, block_cipher: BlockCipher, section_size: int, gamma_block_size: int):
"""
section_size, gamma_block_size - parameters in bytes
"""
assert(block_cipher.key_size == 32)
assert(block_cipher.block_size % 2 == 0)
assert(block_cipher.key_size % block_cipher.block_size == 0)
assert(section_size % block_cipher.block_size == 0)
assert(block_cipher.block_size % gamma_block_size == 0)
BlockCipherMode.__init__(self, block_cipher)
self.section_size = section_size
self.gamma_block_size = gamma_block_size
def _acpkm(self) -> bytes:
key = list(range(128, 128 + 32))
offset = 0
while offset < len(key):
next_offset = offset + self.cipher.block_size
key[offset : next_offset] = self.cipher.encrypt(bytes(key[offset : next_offset]))
offset = next_offset
self.cipher.set_key(bytes(key))
def _inc(self, vector: bytes) -> bytes:
vector = list(vector)
for i in range(len(vector)):
if vector[-i - 1] == 255:
vector[-i - 1] = 0
else:
vector[-i - 1] += 1
break
return bytes(vector)
def _xor(self, u: bytes, v: bytes) -> bytes:
res = []
for a, b in zip(u, v):
res.append(a ^ b)
return bytes(res)
def encrypt(self, plain_text: bytes, key: bytes, initialization_vector: bytes) -> bytes:
assert(len(initialization_vector) == self.cipher.block_size / 2)
cipher_text = []
offset = 0
section_count = (len(plain_text) + self.section_size - 1) // self.section_size
gamma_block_count = self.section_size // self.gamma_block_size
ctr = initialization_vector + bytes([0 for _ in initialization_vector])
self.cipher.set_key(key)
for i in range(section_count):
for j in range(gamma_block_count):
block_size = min(self.gamma_block_size, len(plain_text) - offset)
cipher_text += self._xor(plain_text[offset : offset + block_size], self.cipher.encrypt(ctr)[:block_size])
offset += block_size
ctr = self._inc(ctr)
self._acpkm()
return bytes(cipher_text)
def decrypt(self, cipher_text: bytes, key: bytes, initialization_vector: bytes) -> bytes:
return self.encrypt(cipher_text, key, initialization_vector)
| true |
5f3aa291e91fd4a91aa36d9b95ffde92bab9cb6f | Python | link-money-dev/robot | /tasks/issue_assets.py | UTF-8 | 7,556 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: UTF-8 -*-
# 该模块给出了一个发行数字资产的例子
# 发行资产需要两个账户:一个是发行账户,一个是分发账户,产生的资产在分发账户里
# 对于本例,产生的资产咋子distributor这个账户里
# 发行账户即:
# 分发账户即:
import wrapper.client as CLIENT
import CONSTANT
constant=CONSTANT.Constant('public')
issuer_private_key_for_LINK='SDCCOAL6ILCJXWQPDZLRNHTMAHLZHC2IIHGPSQMLSIPHRTLNCFNT4A66'
issuer_address_for_LINK='GCA3SBI2Y6AYHLAW2GBTS7C5HTSFW6OTZACHOVJGBQ6JENTE3ZXPNNSL'
distributor_private_key_for_LINK='SCWDXYXEJL6GQWXUADDGFGFPF64ORWHXA7R2FNV4UVS6VIBIQCVD53JH'
distributor_address_for_LINK='GCONR7JZN7VUSFI54BS76VQJRWGUZDLQFPTB7DXHNP6E5KZECUW77VFL'
issuer_private_key_for_CNY='SBWATTQW5UDSVNZ7BVKX3DPR4EJFZCRUBYKRG5SMDESRSD5QWG7VSDQH'
issuer_address_for_CNY='GCNYF4V6CUY2XENJGRHLNB3AQE3RZIOWYHUN6YU5T34N3ZSK4KGCB7DD'
distributor_private_key_for_CNY='SCZVR6ZS3UKV3YHTK5YJJ3E7WD6RLWKJDPRTUNYQQ54BDFAYEQ4JDZ6S'
distributor_address_for_CNY='GB552GC4YLN7O7Z6DDDFOO7ZPK6374H4YZGZ4YJMWQW6HBRRAWNSIIQW'
issuer_private_key_for_OTHERS='SC53U46XXITEIDTDHLKUJAETF2JSLRQ6GKLMEYUAL3YO32EOKPXNFM44'
issuer_address_for_OTHERS='GBTCF6RETMMKZ6NKXIIAL5X3JZEZY2DPIIW77IZ6NWTNIUSAY37EQWAR'
distributor_private_key_for_OTHERS='SCZVR6ZS3UKV3YHTK5YJJ3E7WD6RLWKJDPRTUNYQQ54BDFAYEQ4JDZ6S'
distributor_address_for_OTHERS='GB552GC4YLN7O7Z6DDDFOO7ZPK6374H4YZGZ4YJMWQW6HBRRAWNSIIQW'
# issue LINK
issuer=CLIENT.Client(private_key=issuer_private_key_for_LINK, api_server=constant.API_SERVER)
distributor=CLIENT.Client(private_key=distributor_private_key_for_LINK, api_server=constant.API_SERVER)
result=issuer.issue_asset(distributor.private_key,asset_code='LINK',amount=1000000000)
print(result)
#
# # issue CNY
# issuer=CLIENT.Client(private_key=issuer_private_key_for_CNY, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_CNY, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='CNY',amount=10000000)
# print(result)
#
# # issue FX
# issuer=CLIENT.Client(private_key=issuer_private_key_for_CNY, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_CNY, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='FX',amount=21000000)
# print(result)
# issue Others
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='BTC',amount=21000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='ETH',amount=104000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='LTC',amount=84000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='XRP',amount=100000000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='BCH',amount=21000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='EOS',amount=1000000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='XLM',amount=100000000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='USD',amount=2580000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='ADA',amount=31000000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='XMR',amount=17000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='IOTA',amount=2700000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='DASH',amount=18900000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='TRX',amount=99000000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='NEO',amount=100000000)
# print(result)
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='ETC',amount=107000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='XEM',amount=8999999999)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='ZEC',amount=5000000)
# print(result)
#
# issuer=CLIENT.Client(private_key=issuer_private_key_for_OTHERS, api_server=constant.API_SERVER)
# distributor=CLIENT.Client(private_key=distributor_private_key_for_OTHERS, api_server=constant.API_SERVER)
# result=issuer.issue_asset(distributor.private_key,asset_code='MKR',amount=1000000)
# print(result) | true |
9cc9256022dd5d8aea2e7d25f5b3c3c3d0b88dbb | Python | sagarnikam123/learnNPractice | /codingBat/python/string1/frontAgain.py | UTF-8 | 868 | 3.203125 | 3 | [
"MIT"
] | permissive | #######################################################################################################################
#
# frontAgain
#
# Given a string, return true if the first 2 chars in the string also appear
# at the end of the string, such as with "edited"
#
#######################################################################################################################
#
# frontAgain("edited") → true
# frontAgain("edit") → false
# frontAgain("ed") → true
# frontAgain("jj") → true
# frontAgain("jjj") → true
# frontAgain("jjjj") → true
# frontAgain("jjjk") → false
# frontAgain("x") → false
# frontAgain("") → false
# frontAgain("java") → false
# frontAgain("javaja") → true
#
#######################################################################################################################
| true |
01e7958ca2aa13644fd628351c7df0d47a6f9837 | Python | stellaluminary/Baekjoon | /2110.py | UTF-8 | 452 | 2.890625 | 3 | [] | no_license | # n, c= 5,3
# l = sorted([1,2,8,4,9])
n,c = map(int, input().split())
l = sorted([int(input()) for _ in range(n)])
start = 1
end = l[n - 1] - l[0]
while start <= end:
cnt = 1
mid = (start + end) // 2
current = l[0]
for x in l:
if current + mid <= x:
cnt += 1
current = x
if cnt >= c:
start = mid + 1
ans = mid
else:
end = mid - 1
print(ans)
print(start, end, mid, cnt) | true |
417e6e9ac20164065cd88aca183e161919c88db6 | Python | hinriksnaer/Basic-Transformer | /trainer.py | UTF-8 | 5,212 | 2.640625 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pytorch_lightning as pl
from model import TransformerEncoder, PositionalEncoding
from scheduler import CosineWarmupScheduler
class TransformerPredictor(pl.LightningModule):
def __init__(self, input_dim, model_dim, num_classes, num_heads, num_layers, lr, warmup, max_iters, dropout=0.0, input_dropout=0.0):
"""
Inputs:
input_dim - Hidden dimensionality of the input
model_dim - Hidden dimensionality to use inside the Transformer
num_classes - Number of classes to predict per sequence element
num_heads - Number of heads to use in the Multi-Head Attention blocks
num_layers - Number of encoder blocks to use.
lr - Learning rate in the optimizer
warmup - Number of warmup steps. Usually between 50 and 500
max_iters - Number of maximum iterations the model is trained for. This is needed for the CosineWarmup scheduler
dropout - Dropout to apply inside the model
input_dropout - Dropout to apply on the input features
"""
super().__init__()
self.save_hyperparameters()
self._create_model()
def _create_model(self):
# Input dim -> Model dim
self.input_net = nn.Sequential(
nn.Dropout(self.hparams.input_dropout),
nn.Linear(self.hparams.input_dim, self.hparams.model_dim)
)
# Positional encoding for sequences
self.positional_encoding = PositionalEncoding(d_model=self.hparams.model_dim)
# Transformer
self.transformer = TransformerEncoder(num_layers=self.hparams.num_layers,
input_dim=self.hparams.model_dim,
dim_feedforward=2*self.hparams.model_dim,
num_heads=self.hparams.num_heads,
dropout=self.hparams.dropout)
# Output classifier per sequence lement
self.output_net = nn.Sequential(
nn.Linear(self.hparams.model_dim, self.hparams.model_dim),
nn.LayerNorm(self.hparams.model_dim),
nn.ReLU(inplace=True),
nn.Dropout(self.hparams.dropout),
nn.Linear(self.hparams.model_dim, self.hparams.num_classes)
)
def forward(self, x, mask=None, add_positional_encoding=True):
"""
Inputs:
x - Input features of shape [Batch, SeqLen, input_dim]
mask - Mask to apply on the attention outputs (optional)
add_positional_encoding - If True, we add the positional encoding to the input.
Might not be desired for some tasks.
"""
x = self.input_net(x)
if add_positional_encoding:
x = self.positional_encoding(x)
x = self.transformer(x, mask=mask)
x = self.output_net(x)
return x
@torch.no_grad()
def get_attention_maps(self, x, mask=None, add_positional_encoding=True):
"""
Function for extracting the attention matrices of the whole Transformer for a single batch.
Input arguments same as the forward pass.
"""
x = self.input_net(x)
if add_positional_encoding:
x = self.positional_encoding(x)
attention_maps = self.transformer.get_attention_maps(x, mask=mask)
return attention_maps
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.lr)
# Apply lr scheduler per step
lr_scheduler = CosineWarmupScheduler(optimizer,
warmup=self.hparams.warmup,
max_iters=self.hparams.max_iters)
return [optimizer], [{'scheduler': lr_scheduler, 'interval': 'step'}]
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
class ReversePredictor(TransformerPredictor):
def _calculate_loss(self, batch, mode="train"):
# Fetch data and transform categories to one-hot vectors
inp_data, labels = batch
inp_data = F.one_hot(inp_data, num_classes=self.hparams.num_classes).float()
# Perform prediction and calculate loss and accuracy
preds = self.forward(inp_data, add_positional_encoding=True)
loss = F.cross_entropy(preds.view(-1,preds.size(-1)), labels.view(-1))
acc = (preds.argmax(dim=-1) == labels).float().mean()
# Logging
self.log(f"{mode}_loss", loss)
self.log(f"{mode}_acc", acc)
return loss, acc
def training_step(self, batch, batch_idx):
loss, _ = self._calculate_loss(batch, mode="train")
return loss
def validation_step(self, batch, batch_idx):
_ = self._calculate_loss(batch, mode="val")
def test_step(self, batch, batch_idx):
_ = self._calculate_loss(batch, mode="test")
| true |
1007d77674650f04d823163fbf8ce056e07ac436 | Python | adamcath/mailr | /mailr | UTF-8 | 3,060 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import csv
import tempfile
import os
import sys
def fail(msg, errno):
sys.stderr.write(msg + "\n")
sys.exit(errno)
def cli():
parser = argparse.ArgumentParser(description="Send builk emails")
subparsers = parser.add_subparsers()
build_parser = subparsers.add_parser(
"build", help="Create the emails but don't send them")
build_parser.add_argument(
"--subject", help="Subject line for all emails", required=True)
build_parser.add_argument(
"--from", help="From (use 'Name <address>' format)", required=True)
build_parser.add_argument(
"--to-field", help="Which column of data to use as the destination email (should be in Name <address> format)", required=True)
build_parser.add_argument(
"template", help="Template file (see README for docs)")
build_parser.add_argument(
"csv", help="Data file in CSV format. Must have header row")
build_parser.set_defaults(func=build)
send_parser = subparsers.add_parser(
"send", help="Send the emails that you built previously")
send_parser.set_defaults(func=send)
args = parser.parse_args()
args.func(args)
def build(args):
# Create project dir
try:
os.mkdir("emails")
except Exception as e:
fail("Failed to create project directory './emails': " + str(e), 1)
# Read template
try:
template = open(args.template, "r").read()
except Exception as e:
fail("Failed to read template file: " + str(e), 2)
# Read data file
try:
csv_lines = open(args.csv, "r").readlines()
except Exception as e:
fail("Failed to read data file: " + str(e), 3)
# Parse CSV
try:
rows = list(csv.DictReader(csv_lines))
except Exception as e:
fail("Failed to parse CSV file: " + str(e), 4)
# Create each email
for i in range(len(rows)):
try:
email_buf = template.format(**rows[i])
except Exception as e:
fail("Template population failed at row %d: %s" % i, str(e))
email_filename = os.path.join("emails", str(i) + ".email")
try:
email_fd = open(email_filename, "w")
email_fd.write("From: " + vars(args)["from"] + "\n")
email_fd.write("Subject: " + args.subject + "\n")
email_fd.write("To: " + rows[i][args.to_field] + "\n\n")
email_fd.write(email_buf)
email_fd.close()
except Exception as e:
fail("Failed to write email " + email_filename + ": " + str(e), 5)
def send(args):
try:
all_dirents = os.listdir("emails")
except Exception as e:
fail("Failed to load emails: " + str(e), 10)
dirents = filter(lambda name: name.endswith(".email"), all_dirents)
for dirent in dirents:
filename = os.path.join("emails", dirent)
if os.system("sendmail -vt < " + filename) != 0:
fail("Failed to send email " + filename, 11)
cli()
| true |
660e7f5dd2047d82cabe1705b80101c36ce395b9 | Python | pedrozopayares/Data-Structure-and-Algorithms | /Python 3/BubbleSort.py | UTF-8 | 1,623 | 4.0625 | 4 | [] | no_license | '''
<< Bubble Sort Algorithm >>
Sometimes referred to as sinking sort,
is a simple sorting algorithm that repeatedly steps through the list,
compares adjacent elements and swaps them if they are in the wrong order.
The pass through the list is repeated until the list is sorted.
The algorithm, which is a comparison sort,
is named for the way smaller or larger elements "bubble" to the top of the list.
More info -> https://en.wikipedia.org/wiki/Bubble_sort.
'''
number = input("Enter a number with several digits: ")
digits = list(map(int, str(number))) # Map digits in number to list
digitsLenght = len(digits) # Get string lenght
aux = 0 # This is to the interchange min and max position
for i in range(digitsLenght-1): # For each digit in number
for j in range((digitsLenght-1) - i): # For each digit in unordered sublist
if digits[j] > digits[j+1]: # If digit in lower position is greater than the next digit ...
aux = digits[j] # Temporary copy the grater digit to auxiliar position
digits[j] = digits[j+1] # Copy the lower digit to lower position
digits[j+1] = aux # Copy the grater digit to greater index in list
print("Digits ordered from lower to greater:" + str(digits)) # Print ordered list
# See running: https://colab.research.google.com/drive/1bSSMi8pzMevl_xg34rkRU-_POSQXsADk#scrollTo=q4f7Jv2g6YRe&line=8&uniqifier=1
# See interesting discution about list elements interchanges in Python: https://mail.python.org/pipermail/python-es/2006-March/011779.html
| true |
992b562a47c5b3195f1c7db721776c4aefef2bfa | Python | billwestfall/python | /miscellaneous/sudoku/002_three.py | UTF-8 | 301 | 3.34375 | 3 | [] | no_license | import numpy as np
a1 = [6, 6]
arr = np.random.randint(1, 4, size = (3, 3))
arr_sum = np.sum(arr, axis=1)
arr_sumb = np.sum(arr, axis=0)
print('-----Generated Random Array----')
print(arr)
if np.array_equal(arr_sum, a1) and np.array_equal(arr_sumb, a1):
print(arr_sum)
else:
print("Not sudoku")
| true |
8b3cd836c3d666cb5a4d8127704089667e761b9e | Python | jose-myvalue/investment | /app/value/fundamentals.py | UTF-8 | 21,592 | 2.59375 | 3 | [] | no_license | from datetime import datetime
from typing import Dict
from app.value.forecast import ForecastLR
from app.value.utils import Utils
import pandas as pd
import numpy as np
pd.options.display.float_format = "{:.2f}".format
pd.options.mode.chained_assignment = None
np.set_printoptions(suppress=True, formatter={"float_kind": "{:f}".format}, precision=2)
class Fundamentals:
df_income_statement_quarterly = []
df_balance_sheet_quarterly = []
df_highlights = []
df_income_statement_yearly = []
df_balance_sheet_yearly = []
df_stocks = []
df_shares_stats = []
df_historical_eps = []
df_historical_net_income = []
def __init__(self, data: Dict):
self.ticker = data["General"]["Code"]
self.df_highlights = pd.DataFrame(data["highlights"], index=[0])
self.df_valuation = pd.DataFrame(data["valuation"], index=[0])
self.df_income_statement_quarterly = pd.DataFrame(data["quarters_income_statement"]).T
self.df_balance_sheet_quarterly = pd.DataFrame(data["quarters_balance_sheet"]).T
self.df_cash_flow_quarterly = pd.DataFrame(data["quarters_cash_flow"]).T
self.df_income_statement_yearly = pd.DataFrame(data["yearly_income_statement"]).T
self.df_balance_sheet_yearly = pd.DataFrame(data["yearly_balance_sheet"]).T
self.df_stocks = pd.DataFrame(data["Stocks"]).T
self.df_shares_stats = pd.DataFrame(data["shares_stats"], index=[0])
self.df_outstanding_shares_annual = pd.DataFrame(data["outstanding-shares-annual"]).T
self.df_outstanding_shares_quarterly = pd.DataFrame(data["outstanding-shares-quarterly"]).T
# self.df_balance_sheet_quarterly.reset_index(inplace=True)
# self.df_stocks.reset_index(inplace=True)
# STOCK PRICE
def get_stock_price(self):
try:
df_stocks_flipped = self.df_stocks["close"].iloc[::-1]
return round(df_stocks_flipped.iloc[0], 2)
except TypeError:
return 0.0
def get_historical_stock_price(self):
df_stocks_flipped = self.df_stocks["close"].iloc[::-1].to_frame()
df_stocks_flipped.reset_index(inplace=True)
df_stocks_flipped.rename(columns={"index": "date"}, inplace=True)
df_stocks_flipped.rename(columns={"close": "close_price"}, inplace=True)
df_stocks_flipped["date"] = df_stocks_flipped["date"]
df_stocks_flipped.set_index("date", inplace=True)
return df_stocks_flipped.to_dict()
def get_nopat_2_ttm(self, ticker):
net_income_value = self.get_net_income_ttm()
df_dividends_paid_ttm = (
self.df_cash_flow_quarterly["dividendsPaid"].rolling(4).sum().shift(-3)
)
df_dividends_paid_ttm = df_dividends_paid_ttm.dropna().to_frame()
df_dividends_paid_ttm = df_dividends_paid_ttm.reset_index()
df_dividends_paid_ttm.rename(columns={"index": "date"}, inplace=True)
dividends_paid_last_date_available = df_dividends_paid_ttm["date"].max()
dividends_paid = df_dividends_paid_ttm.loc[
df_dividends_paid_ttm["date"] == dividends_paid_last_date_available
]
try:
dividends_paid_value = dividends_paid["dividendsPaid"].iloc[0]
except IndexError:
dividends_paid_value = 0.0
print(ticker)
print("Net Income Value: " + str(net_income_value))
print("Dividends paid: " + str(dividends_paid_value))
# EQUITY
def get_equity(self):
return float(self.df_balance_sheet_quarterly["totalStockholderEquity"].dropna().iloc[0])
# EBITDA
def get_ebitda(self):
try:
return round(self.df_highlights["EBITDA"].iloc[0], 2)
except TypeError:
return 0.0
def __get_ebitda_historical(self):
df_operating_income_ttm = (
self.df_income_statement_quarterly["operatingIncome"].rolling(4).sum().shift(-3)
)
df_deprecations_ttm = self.df_cash_flow_quarterly["depreciation"].rolling(4).sum().shift(-3)
df_operating_income_ttm = df_operating_income_ttm.to_frame()
df_deprecations_ttm = df_deprecations_ttm.to_frame()
df_operating_income_ttm = df_operating_income_ttm[
df_operating_income_ttm["operatingIncome"].notna()
]
df_deprecations_ttm = df_deprecations_ttm[df_deprecations_ttm["depreciation"].notna()]
df_ebitda_ttm = df_operating_income_ttm.merge(
df_deprecations_ttm, left_index=True, right_index=True
)
df_ebitda_ttm["ebitda"] = df_ebitda_ttm["operatingIncome"] + df_ebitda_ttm["depreciation"]
return df_ebitda_ttm
def get_ebitda_historical(self):
return self.__get_ebitda_historical().to_dict()
def get_ebitda_forecast(self):
df_ebitda = self.__get_ebitda_historical()
df_ebitda.reset_index(inplace=True)
df_ebitda.rename(columns={"index": "date"}, inplace=True)
df_ebitda.rename(columns={0: "ebitda"}, inplace=True)
df_ebitda.reset_index(inplace=True)
x = df_ebitda["index"].to_numpy()
y = df_ebitda["ebitda"].to_numpy()
y_future = ForecastLR.get_forecast(x, y)
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
quarter = __get_quarter(df_ebitda["date"].iloc[0])
year = __get_year(df_ebitda["date"].iloc[0])
forecasting_dict = dict(
{
"year": year,
"quarter": quarter,
"ebitda": float(round(df_ebitda["ebitda"].iloc[0], 2)),
}
)
forecast_list = list()
forecast_list.append(forecasting_dict)
for j in range(0, y_future.size):
quarter, year = Utils.get_next_quarter(quarter, year)
forecasting_dict = dict(
{"year": year, "quarter": quarter, "ebitda": float(round(y_future[j][0], 2)),}
)
forecast_list.append(forecasting_dict)
return forecast_list
# MARKETCAP
def get_marketcap(self):
try:
return round(self.df_highlights["MarketCapitalization"].iloc[0], 2)
except TypeError:
return 0.0
# TODO Review
def get_marketcap_historical(self):
df_stocks = self.df_stocks[["date", "close"]].iloc[::-1]
df_shares = self.get_outstanding_shares()
df_stocks["close"] = pd.to_numeric(df_stocks["close"])
df_shares["shares"] = pd.to_numeric(df_shares["shares"])
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
def __get_quarter_shares(date):
quarter = pd.Timestamp(date).quarter
if quarter in range(1, 4):
return quarter + 1
elif quarter == 4:
return 1
def __get_year_shares(date):
year = pd.Timestamp(date).year
quarter = pd.Timestamp(date).quarter
if quarter in range(1, 4):
return year
elif quarter == 4:
return year + 1
df_shares["year"] = df_shares["dateFormatted"].apply(__get_year_shares)
df_shares["quarter"] = df_shares["dateFormatted"].apply(__get_quarter_shares)
df_stocks["year"] = df_stocks["date"].apply(__get_year)
df_stocks["quarter"] = df_stocks["date"].apply(__get_quarter)
df_marketcap = df_stocks.merge(
df_shares, how="inner", left_on=["year", "quarter"], right_on=["year", "quarter"]
)
df_marketcap["marketcap"] = df_marketcap["close"] * df_marketcap["shares"]
return df_marketcap
# OUTSTANDING SHARES
def get_outstanding_shares(self):
df_outstanding_shares = self.df_outstanding_shares_quarterly[["dateFormatted", "shares"]]
return df_outstanding_shares
# CASH
def get_cash(self):
return float(self.df_balance_sheet_quarterly["cash"].dropna().iloc[0])
def get_cash_historical(self):
df_cash = self.df_balance_sheet_quarterly["cash"].to_frame()
df_cash.reset_index(inplace=True)
df_cash.rename(columns={"index": "date"}, inplace=True)
df_cash["cash"] = pd.to_numeric(df_cash["cash"])
df_cash = df_cash.dropna()
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
df_cash["year"] = df_cash["date"].apply(__get_year)
df_cash["quarter"] = df_cash["date"].apply(__get_quarter)
return df_cash
def get_cash_forecast(self):
df_cash = self.get_cash_historical()
df_cash.reset_index(inplace=True)
x = df_cash["index"].to_numpy()
y = df_cash["cash"].to_numpy()
y_future = ForecastLR.get_forecast(x, y)
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
quarter = __get_quarter(df_cash["date"].iloc[0])
year = __get_year(df_cash["date"].iloc[0])
forecasting_dict = dict(
{"year": year, "quarter": quarter, "cash": float(round(df_cash["cash"].iloc[0], 2)),}
)
forecast_list = list()
forecast_list.append(forecasting_dict)
for j in range(0, y_future.size):
quarter, year = Utils.get_next_quarter(quarter, year)
forecasting_dict = dict(
{"year": year, "quarter": quarter, "cash": float(round(y_future[j][0], 2)),}
)
forecast_list.append(forecasting_dict)
return forecast_list
# TOTAL DEBT
def get_total_debt(self):
df_short_term_deb = self.df_balance_sheet_quarterly["shortTermDebt"]
df_short_term_deb = df_short_term_deb.reset_index()
df_short_term_deb.rename(columns={"index": "date"}, inplace=True)
short_term_deb_last_date_available = df_short_term_deb["date"].max()
short_term_deb = df_short_term_deb.loc[
df_short_term_deb["date"] == short_term_deb_last_date_available
]
try:
short_term_deb_value = short_term_deb["shortTermDebt"].iloc[0]
except IndexError:
short_term_deb_value = 0.0
df_long_term_deb = self.df_balance_sheet_quarterly["longTermDebtTotal"]
df_long_term_deb = df_long_term_deb.reset_index()
df_long_term_deb.rename(columns={"index": "date"}, inplace=True)
long_term_deb_last_date_available = df_long_term_deb["date"].max()
long_term_deb = df_long_term_deb.loc[
df_long_term_deb["date"] == long_term_deb_last_date_available
]
try:
long_term_deb_value = long_term_deb["longTermDebtTotal"].iloc[0]
except IndexError:
long_term_deb_value = 0.0
if short_term_deb_value is None:
short_term_deb_value = 0.0
if long_term_deb_value is None:
long_term_deb_value = 0.0
total_debt = float(short_term_deb_value) + float(long_term_deb_value)
return round(total_debt, 2)
def get_total_debt_historical(self):
df_short_term_deb = self.df_balance_sheet_quarterly["shortTermDebt"].fillna(0).to_frame()
df_long_term_deb = self.df_balance_sheet_quarterly["longTermDebtTotal"].fillna(0).to_frame()
df_short_term_deb["shortTermDebt"] = pd.to_numeric(df_short_term_deb["shortTermDebt"])
df_long_term_deb["longTermDebtTotal"] = pd.to_numeric(
(df_long_term_deb["longTermDebtTotal"])
)
df_short_term_deb.reset_index(inplace=True)
df_long_term_deb.reset_index(inplace=True)
df_short_term_deb.rename(columns={"index": "date"}, inplace=True)
df_long_term_deb.rename(columns={"index": "date"}, inplace=True)
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
df_short_term_deb["year"] = df_short_term_deb["date"].apply(__get_year)
df_short_term_deb["quarter"] = df_short_term_deb["date"].apply(__get_quarter)
df_long_term_deb["year"] = df_long_term_deb["date"].apply(__get_year)
df_long_term_deb["quarter"] = df_long_term_deb["date"].apply(__get_quarter)
df_total_debt = df_long_term_deb.merge(
df_short_term_deb,
how="inner",
left_on=["year", "quarter"],
right_on=["year", "quarter"],
)
df_total_debt["total_debt"] = (
df_total_debt["longTermDebtTotal"] + df_total_debt["shortTermDebt"]
)
df_total_debt = df_total_debt.dropna()
return df_total_debt
def get_total_debt_forecast(self):
df_total_debt = self.get_total_debt_historical()
df_total_debt.reset_index(inplace=True)
x = df_total_debt["index"].to_numpy()
y = df_total_debt["total_debt"].to_numpy()
y_future = ForecastLR.get_forecast(x, y)
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
quarter = __get_quarter(df_total_debt["date_y"].iloc[0])
year = __get_year(df_total_debt["date_y"].iloc[0])
forecasting_dict = dict(
{
"year": year,
"quarter": quarter,
"total_debt": float(round(df_total_debt["total_debt"].iloc[0], 2)),
}
)
forecast_list = list()
forecast_list.append(forecasting_dict)
for j in range(0, y_future.size):
quarter, year = Utils.get_next_quarter(quarter, year)
forecasting_dict = dict(
{"year": year, "quarter": quarter, "total_debt": float(round(y_future[j][0], 2)),}
)
forecast_list.append(forecasting_dict)
return forecast_list
# EBIT
def get_ebit_ttm(self):
# getting net income TTM dataframe
df_ebit_ttm = self.df_income_statement_quarterly["ebit"].rolling(4).sum().shift(-3)
df_ebit_ttm = df_ebit_ttm.dropna().to_frame()
df_ebit_ttm = df_ebit_ttm.reset_index()
df_ebit_ttm.rename(columns={"index": "date"}, inplace=True)
ebit_ttm_last_date_available = df_ebit_ttm["date"].max()
ebit_ttm = df_ebit_ttm.loc[df_ebit_ttm["date"] == ebit_ttm_last_date_available]
ebit_ttm_value = ebit_ttm["ebit"].iloc[0]
return float(ebit_ttm_value)
def get_ebit_historical(self):
# getting net income TTM dataframe
df_ebit_ttm = self.df_income_statement_quarterly["ebit"].rolling(4).sum().shift(-3)
df_ebit_ttm = df_ebit_ttm.dropna().to_frame()
df_ebit_ttm = df_ebit_ttm.reset_index()
df_ebit_ttm.rename(columns={"index": "date"}, inplace=True)
return df_ebit_ttm
def get_ebit_forecast(self):
df_ebit = self.get_ebit_historical()
df_ebit.reset_index(inplace=True)
x = df_ebit["index"].to_numpy()
y = df_ebit["ebit"].to_numpy()
y_future = ForecastLR.get_forecast(x, y)
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
quarter = __get_quarter(df_ebit["date"].iloc[0])
year = __get_year(df_ebit["date"].iloc[0])
forecasting_dict = dict(
{"year": year, "quarter": quarter, "ebit": float(round(df_ebit["ebit"].iloc[0], 2)),}
)
forecast_list = list()
forecast_list.append(forecasting_dict)
for j in range(0, y_future.size):
quarter, year = Utils.get_next_quarter(quarter, year)
forecasting_dict = dict(
{"year": year, "quarter": quarter, "ebit": float(round(y_future[j][0], 2)),}
)
forecast_list.append(forecasting_dict)
return forecast_list
# Free Cash Flow FCF
def get_fcf_historical(self):
df_capex_ttm = self.df_cash_flow_quarterly["capitalExpenditures"].rolling(4).sum().shift(-3)
df_interest_expense_ttm = (
self.df_income_statement_quarterly["interestExpense"].rolling(4).sum().shift(-3)
)
df_income_tax_expense_ttm = (
self.df_income_statement_quarterly["incomeTaxExpense"].rolling(4).sum().shift(-3)
)
df_ebitda_ttm = self.__get_ebitda_historical()
df_capex_ttm = df_capex_ttm.to_frame()
df_interest_expense_ttm = df_interest_expense_ttm.to_frame()
df_income_tax_expense_ttm = df_income_tax_expense_ttm.to_frame()
df_capex_ttm = df_capex_ttm[df_capex_ttm["capitalExpenditures"].notna()]
df_interest_expense_ttm = df_interest_expense_ttm[
df_interest_expense_ttm["interestExpense"].notna()
]
df_income_tax_expense_ttm = df_income_tax_expense_ttm[
df_income_tax_expense_ttm["incomeTaxExpense"].notna()
]
df_capex_interest_expense = df_capex_ttm.merge(
df_interest_expense_ttm, left_index=True, right_index=True
)
df_ebitda_capex_interest_expense = df_capex_interest_expense.merge(
df_ebitda_ttm, left_index=True, right_index=True
)
df_fcf_ttm = df_ebitda_capex_interest_expense.merge(
df_income_tax_expense_ttm, left_index=True, right_index=True
)
df_fcf_ttm["fcf"] = (
df_fcf_ttm["ebitda"]
- df_fcf_ttm["capitalExpenditures"]
- df_fcf_ttm["interestExpense"]
- df_fcf_ttm["incomeTaxExpense"]
)
df_fcf_ttm.reset_index(inplace=True)
df_fcf_ttm.rename(columns={"index": "date"}, inplace=True)
return df_fcf_ttm
def get_fcf(self):
return round(float(self.get_fcf_historical()["fcf"].iloc[0]), 2)
def get_fcf_forecast(self):
df_fcf = self.get_fcf_historical()
df_fcf.reset_index(inplace=True)
x = df_fcf["index"].to_numpy()
y = df_fcf["fcf"].to_numpy()
y_future = ForecastLR.get_forecast(x, y)
def __get_year(date):
return pd.Timestamp(date).year
def __get_quarter(date):
return pd.Timestamp(date).quarter
quarter = __get_quarter(df_fcf["date"].iloc[0])
year = __get_year(df_fcf["date"].iloc[0])
forecasting_dict = dict(
{"year": year, "quarter": quarter, "fcf": float(round(df_fcf["fcf"].iloc[0], 2)),}
)
forecast_list = list()
forecast_list.append(forecasting_dict)
for j in range(0, y_future.size):
quarter, year = Utils.get_next_quarter(quarter, year)
forecasting_dict = dict(
{"year": year, "quarter": quarter, "fcf": float(round(y_future[j][0], 2)),}
)
forecast_list.append(forecasting_dict)
return forecast_list
# NET INCOME
def get_net_income_ttm(self):
df_net_income_ttm = self.get_net_income_historical()
if df_net_income_ttm.empty:
return self.df_highlights["EarningsShare"].iloc[0], 0.0, 0.0
else:
return self.get_net_income_historical()["netIncome"].iloc[0]
def get_net_income_historical(self):
# getting net income TTM dataframe
df_net_income_ttm = (
self.df_income_statement_quarterly["netIncome"].rolling(4).sum().shift(-3)
)
df_net_income_ttm = df_net_income_ttm.dropna().to_frame()
df_net_income_ttm = df_net_income_ttm.reset_index()
df_net_income_ttm.rename(columns={"index": "date"}, inplace=True)
if df_net_income_ttm.empty:
df_net_income_ttm = (
self.df_income_statement_quarterly["netIncomeApplicableToCommonShares"]
.rolling(4)
.sum()
.shift(-3)
)
df_net_income_ttm = df_net_income_ttm.dropna().to_frame()
df_net_income_ttm = df_net_income_ttm.reset_index()
df_net_income_ttm.rename(columns={"index": "date"}, inplace=True)
df_net_income_ttm.rename(
columns={"netIncomeApplicableToCommonShares": "netIncome"}, inplace=True
)
return df_net_income_ttm
else:
return df_net_income_ttm
def get_net_income_forecast(self):
df_net_income_historical = self.get_net_income_historical()
x = df_net_income_historical["date"].to_numpy()
y = df_net_income_historical["netIncome"].to_numpy()
y_future = ForecastLR.get_forecast(x, y)
fecha = df_net_income_historical["date"].iloc[0]
quarter = pd.Timestamp(fecha).quarter
year = pd.Timestamp(fecha).year
# quarter, year = fecha.quarter, fecha.year
forecast_list = list()
forecasting_dict = dict(
{
"year": year,
"quarter": quarter,
"netIncome": float(round(df_net_income_historical["netIncome"].iloc[0], 2)),
}
)
forecast_list.append(forecasting_dict)
for j in range(0, y_future.size):
quarter, year = Utils.get_next_quarter(quarter, year)
forecasting_dict = dict(
{"year": year, "quarter": quarter, "netIncome": float(round(y_future[j][0], 2)),}
)
forecast_list.append(forecasting_dict)
return forecast_list
| true |
412eba15b19d85b9e018fe0e8d11b01239bbe539 | Python | aganpython/laopo3.5 | /python/Socket&Twisted/T_TCPClient.py | UTF-8 | 402 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from socket import *
HOST = 'localhost'
PORT = 10001
BUFFER = 1024
ADDRESS = (HOST, PORT)
clientSocet = socket(AF_INET, SOCK_STREAM)
clientSocet.connect(ADDRESS)
while True:
data = input('> ')
if not data:
break
clientSocet.send(data.encode())
data = clientSocet.recv(BUFFER).decode()
if not data:
break
print(data)
clientSocet.close()
| true |
904a893b7495a25fc43bb14f22448d6a894f30aa | Python | yongkwangshin/tensorflow | /cnn.py | UTF-8 | 3,250 | 3.3125 | 3 | [] | no_license | # cnn.py: MNIST image recognition with CNN
# written by Sung Kyu Lim
# limsk@ece.gatech.edu
# 12/18/2017
# B1: import tensorflow
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# B2: MNIST data set up
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# B3: weight initialization with truncated normal distribution
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# B4: initialize bias with 0.1
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# B5: convolution with common setting
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# B6: pooling with common setting
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# B7: input image reshaping to 4D tensor for CNN
# format: [batch, height, width, channels]
x = tf.placeholder(tf.float32, shape=[None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
# B8: first CNN layer: CONV -> RELU -> POOL
# We use 5x5 patch, accept 1 channel, and produce 32.
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# B9: second CNN layer: CONV -> RELU -> POOL
# We use 5x5 patch, accept 32 channel, and produce 64.
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# B10: fully connected layer with 1024 neurons: FC -> RELU
# Images are reduced to 7x7 and reshaped.
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# B11: neuron dropout to avoid overfitting
# 'keep' contains keep rate
keep = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep)
# B12: readout layer using softmax regression
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# B13: loss function and optimizer
y_ = tf.placeholder(tf.float32, shape=[None, 10])
loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), 1))
opt = tf.train.AdamOptimizer(0.001).minimize(loss)
# B14: accuracy calculation for printing
right = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
acc = tf.reduce_mean(tf.cast(right, tf.float32))
# B15: session run
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch = mnist.train.next_batch(50)
sess.run(opt, feed_dict={x: batch[0], y_: batch[1], keep: 0.1})
if i % 100 == 0:
check = sess.run(acc, feed_dict={x:batch[0], y_: batch[1], keep: 1.0})
print("step %d, training accuracy %.2f" % (i, check))
# B16: model accuracy with MNIST test set
images = mnist.test.images
labels = mnist.test.labels
final = sess.run(acc, feed_dict={x: images, y_: labels, keep: 1.0})
print("final test accuracy %g" % final)
| true |
c295b6a3c141e4f8b97e044c2d86d663bb74abf5 | Python | loggar/py | /py-core/io/io_file_position.py | UTF-8 | 366 | 3.5625 | 4 | [] | no_license | #!/usr/bin/python3
# Open a file
fo = open("./dist/foo.txt", "r+")
s = fo.read(10)
print("Read String is : ", s)
# Check current position
position = fo.tell()
print("Current file position : ", position)
# Reposition pointer at the beginning once again
position = fo.seek(0, 0)
s2 = fo.read(10)
print("Again read String is : ", s2)
# Close opened file
fo.close()
| true |
7c7b56f08a053ccde1ea3fe8398cb9e70634365a | Python | Aasthaengg/IBMdataset | /Python_codes/p02855/s871657124.py | UTF-8 | 1,317 | 2.796875 | 3 | [] | no_license | H, W, K = map(int, input().split())
L = []
for _ in range(H):
s = input()
row = []
for j in range(len(s)):
row.append(s[j])
L.append(row)
# print(L)
ans = [[0] * W for _ in range(H)]
# ans[0][1] = 1
# print(ans[0][1])
color = 1
count = 0
for i in range(H):
if '#' not in L[i]:
for j in range(W):
ans[i][j] = -1
continue
else:
idx = 0
while idx < W:
count += 1
# print(ans[i][idx])
if L[i][idx] == '.':
# print(ans[i][idx])
ans[i][idx] = color
# print(ans, i, idx)
idx += 1
else:
# print(ans[i][idx])
ans[i][idx] = color
# print(ans, i, idx)
if ('#' in L[i][idx + 1:]):
color += 1
idx += 1
color += 1
new_ans = []
count = 0
for v in ans:
if -1 in v:
count += 1
continue
if count != 0 and -1 not in v:
new_ans.append(v)
while count > 0:
new_ans.append(v)
count -= 1
elif count == 0 and -1 not in v:
new_ans.append(v)
if count != 0:
while count > 0:
new_ans.append(new_ans[-1])
count -= 1
for i in new_ans:
print(*i)
| true |
660cc397886fb61f763d77f7e3fe135a22e321a0 | Python | 553672759/xxgit | /python/old/exercise/testDir/zidian.py | UTF-8 | 469 | 3.171875 | 3 | [] | no_license | #coding:utf8
'''
Created on 2016-10-22
@author: xx
'''
'''
字典:一种映射关系
'''
#创建字典的方式
aInfo={'aaa':3000,"bbb":5000}
info=[('aaa',3000),('bbb',5000)]
bInfo=dict(info)
cInfo=dict([('aaa',3000),('bbb',5000)])
dInfo=dict(aaa=3000,bbb=5000)
fInfo=dict((('aaa',3000),('bbb',5000)))
print fInfo
aDict={}.fromkeys(('aaa','bbb','ccc'),8000)#将所有的对应8000
aaDict={}.fromkeys(['aaa','bbb','ccc'],8000)
print sorted(aDict)#输出的是list
| true |
6b7825de617ca9dbbbccd3f2cf4354856a002991 | Python | rentfrow/DAQ_Logger | /DAQ_errors.py | UTF-8 | 23,147 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
# SCPI Errors
# -100 to -200 Command Errors
# -200 to -299 Execution Errors
# -300 to -399 SCPI Specified Device-Specific Errors
# -400 to -800 Query and System Errors
# 100 to 200 PNA-specific Errors
# http://na.support.keysight.com/pna/help/latest/Support/SCPI_Errors.htm
# -100 to -200 Command Errors
# A command error indicates that the test set's GPIB parser has detected an IEEE 488.2 syntax error.
# When one of these errors is generated, the command error bit in the event status register is set.
SCPI_error = {-100, ["std_command", "Command - This event bit (Bit 5) indicates a syntax error, or a semantic error, or a GET command was entered, see IEEE 488.2, 11.5.1.1.4.
-101
std_invalidChar
Invalid character - Indicates a syntactic elements contains a character which is invalid for that type.
-102
std_syntax
Syntax - Indicates that an unrecognized command or data type was encountered. For example, a string was received when the device does not accept strings.
-103
std_invalidSeparator
Invalid separator - The parser was expecting a separator and encountered an illegal character. For example, the semicolon was omitted after a program message unit.
-104
std_wrongParamType
Data type -The parser recognized a data element different than one allowed. For example, numeric or string data was expected but block data was encountered.
-105
std_GETNotAllowed
GET not allowed - Indicates a Group Execute Trigger was received within a program message. Correct the program so that the GET does not occur within the program code.
-108
std_tooManyParameters
Parameter not allowed - Indicates that more parameters were received than expected for the header. For example, *ESE common command only accepts one parameter, so *ESE 0,1 is not allowed.
-109
std_tooFewParameters
Missing parameter - Indicates that less parameters were received than required for the header. For example, *ESE requires one parameter, *ESE is not allowed.
-110
std_cmdHeader
Command header - Indicates an error was detected in the header. This error is used when the device cannot detect the more specific errors -111 through -119.
-111
std_headerSeparator
Header separator - Indicates that a character that is not a legal header separator was encountered while parsing the header.
-112
std_IDTooLong
Program mnemonic too long - Indicates that the header contains more that twelve characters, see IEEE 488.2, 7.6.1.4.1.
-113
std_undefinedHeader
Undefined header - Indicates the header is syntactically correct, but it is undefined for this specific device. For example, *XYZ is not defined for any device.
-114
std_suffixOutOfRange
Header suffix out of range - Indicates the value of a header suffix attached to a program mnemonic makes the header invalid.
-120
std_numericData
Numeric data - This error, as well as errors
-121
std_invalidCharInNumber
Invalid character in number - Indicates an invalid character for the data type being parsed was encountered. For example, an alpha in a decimal numeric or a "9" in octal data.
-123
std_exponentTooLarge
Exponent too large - Indicates the magnitude of an exponent was greater than 32000, see IEEE 488.2, 7.7.2.4.1.
-124
std_decimalTooLong
Too many digits - Indicates the mantissa of a decimal numeric data element contained more than 255 digits excluding leading zeros, see IEEE 488.2, 7.7.2.4.1.
-128
std_numericNotAllowed
Numeric data not allowed - Indicates that a legal numeric data element was received, but the device does not accept one in this position for the header.
-130
std_suffix
Suffix - This error, as well as errors -131 through -139, are generated when parsing a suffix. This particular error message is used if the device cannot detect a more specific error.
-131
std_badSuffix
Invalid suffix - Indicates the suffix does not follow the syntax described in IEEE 488.2, 7.7.3.2, or the suffix is inappropriate for this device.
-134
std_suffixTooLong
Suffix too long - Indicates the suffix contain more than 12 characters, see IEEE 488.2, 7.7.3.4.
-138
std_suffixNotAllowed
Suffix not allowed - Indicates that a suffix was encountered after a numeric element that does not allow suffixes.
-140
std_charData
Character data - This error, as well as errors
-141
std_invalidCharData
Invalid character data - Indicates that the character data element contains an invalid character or the particular element received is not valid for the header.
-144
std_charDataTooLong
Character data too long - Indicates the character data element contains more than twelve characters, see IEEE 488.2, 7.7.1.4.
-148
std_charNotAllowed
Character data not allowed - Indicates a legal character data element was encountered where prohibited by the device.
-150
std_stringData
String data - This error, as well as errors
-151
std_stringInvalid
Invalid string data - Indicates that a string data element was expected, but was invalid, see IEEE 488.2, 7.7.5.2. For example, an END message was received before the terminal quote character.
-158
std_stringNotAllowed
String data not allowed - Indicates that a string data element was encountered but was not allowed by the device at this point in parsing.
-160
std_blockData
Block data - This error, as well as errors -161 through -169, are generated when parsing a block data element. This particular error message is used if the device cannot detect a more specific error.
-161
std_badBlock
Invalid block data - Indicates a block data element was expected, but was invalid, see IEEE 488.2, 7.7.6.2. For example, and END message was received before the end length was satisfied.
-168
std_blockNotAllowed
Block data not allowed - Indicates a legal block data element was encountered, but not allowed by the device at this point in parsing.
-170
std_expr
Expression - This error, as well as errors -171 through -179, are generated when parsing an expression data element. This particular error message is used if the device cannot detect a more specific error.
-171
std_invalidExpression
Invalid expression - Indicates the expression data element was invalid, see IEEE 488.2, 7.7.7.2. For example, unmatched parentheses or an illegal character.
-178
std_exprNotAllowed
Expression data not allowed - Indicates a legal expression data was encountered, but was not allowed by the device at this point in parsing.
-180
std_macro
Macro - This error, as well as error -181 through -189, are generated when defining a macro or execution a macro. This particular error message is used if the device cannot detect a more specific error.
-181
std_validOnlyInsideMacro
Invalid outside macro definition - Indicates that a macro parameter place holder was encountered outside of a macro definition.
-183
std_invalidWithinMacro
Invalid inside macro definition - Indicates that the program message unit sequence, sent with a *DDT or a *DMC command, is syntactically invalid, see IEEE 488.2, 10.7.6.3.
-184
std_macroParm
Macro parameter - Indicates that a command inside the macro definition had the wrong number or type of parameters.
-200 to -299 Execution Errors
These errors are generated when something occurs that is incorrect in the current state of the instrument. These errors may be generated by a user action from either the remote or the manual user interface
-200
std_execGen
Execution - This event bit (Bit 4) indicates a PROGRAM DATA element following a header was outside the legal input range or otherwise inconsistent with the device's capabilities, see IEEE 488.2, 11.5.1.1.5.
-201
std_invalidWhileInLocal
Invalid while in local
-202
std_settingsLost
Settings lost due to rtl
-203
std_commandProtected
Command protected - Indicates that a legal password-protected program command or query could not be executed because the command was disabled.
-210
std_trigger
Trigger
-211
std_triggerIgnored
Trigger ignored
-212
std_armIgnored
Arm ignored
-213
std_initIgnored
Init ignored
-214
std_triggerDeadlock
Trigger deadlock
-215
std_armDeadlock
Arm deadlock
-220
std_parm
Parameter - Indicates that a program data element related error occurred.
-221
std_settingsConflict
Settings conflict - Indicates that a legal program data element was parsed but could not be executed due to the current device state.
-222
std_dataOutOfRange
Data out of range - Indicates that a legal program data element was parsed but could not be executed because the interpreted value was outside the legal range defined by the devices
-223
std_tooMuchData
Too much data - Indicates that a legal program data element of block, expression, or string type was received that contained more data than the device could handle due to memory or related device-specific requirements.
-224
std_illegalParmValue
Illegal parameter value - Indicates that the value selected was not part of the list of values given.
-225
std_noMemoryForOp
Out of memory - The device has insufficient memory to perform the requested operation.
-226
std_listLength
Lists not same length - Attempted to use LIST structure having individual LIST's of unequal lengths.
-230
std_dataCorruptOrStale
Data corrupt or stale - Indicates invalid data, a new reading started but not completed since the last access.
-231
std_dataQuestionable
Data questionable - Indicates that measurement accuracy is suspect.
-232
std_invalidFormat
Invalid format
-233
std_invalidVersion
Invalid version - Indicates that a legal program data element was parsed but could not be executed because the version of the data is incorrect to the device. For example, a not supported file version, a not supported instrument version.
-240
std_hardware
Hardware - Indicates that a legal program command or query could not be executed because of a hardware problem in the device.
-241
std_hardwareMissing
Hardware missing - Indicates that a legal program command or query could not be executed because of missing device hardware. For example, an option was not installed.
-250
std_massStorage
Mass storage - Indicates that a mass storage error occurred. The device cannot detect the more specific errors described for errors -251 through -259.
-251
std_missingMassStorage
Missing mass storage - Indicates that a legal program command or query could not be executed because of missing mass storage.
-252
std_missingMedia
Missing media - Indicates that a legal program command or query could not be executed because of missing media. For example, no disk.
-253
std_corruptMedia
Corrupt media - Indicates that a legal program command or query could not be executed because of corrupt media. For example, bad disk or wrong format.
-254
std_mediaFull
Media full- Indicates that a legal program command or query could not be executed because the media is full. For example, there is no room left on the disk.
-255
std_directoryFull
Directory full - Indicates that a legal program command or query could not be executed because the media directory was full.
-256
std_fileNotFound
File name not found - Indicates that a legal program command or query could not be executed because the file name was not found on the media.
-257
std_fileName
File name - Indicates that a legal program command or query could not be executed because the file name on the device media was in error. For example, an attempt was made to read or copy a nonexistent file.
-258
std_mediaProtected
Media protected - Indicates that a legal program command or query could not be executed becuse the media was protected. For example, the write-protect switch on a memory card was set.
-260
std_expression
Expression
-261
std_math
Math in expression
-270
std_macroExecution
Macro - Indicates that a macro related execution error occurred.
-271
std_macroSyntax
Macro syntax - Indicates that a syntactically legal macro program data sequence, according to IEEE 488.2, 10.7.2, could not be executed due to a syntax error within the macro definition.
-272
std_macroExec
Macro execution - Indicates that a syntactically legal macro program data sequence could not be executed due to some error in the macro definition, see IEEE 488.2, 10.7.6.3.
-273
std_badMacroName
Illegal macro label - Indicates that the macro label was not accepted, it did not agree with the definition in IEEE 488.2, 10.7.3
-274
std_macroPlaceholderMa
cro parameter - Indicates that the macro definition improperly used a macro parameter placeholder, see IEEE 4882, 10.7.3.
-275
std_macroTooLong
Macro definition too long - Indicates that a syntactically legal macro program data sequence could not be executed because the string of block contents were too long for the device to handle, IEEE 488.2, 10.7.6.1.
-276
std_macroRecursion
Macro recursion - Indicates that a syntactically legal macro program data sequence count not be executed because it would be recursive, see IEEE 488.2, 10.7.6.6.
-277
std_cantRedefineMacro
Macro redefinition not allowed - Indicates that redefining an existing macro label, see IEEE 488.2, 10.7.6.4.
-278
std_macroNotFound
Macro header not found - Indicates that a legal macro label in the *GMS?, see IEEE 488.2, 10.13, could not be executed because the header was not previously defined.
-280
std_program
Program
-281
std_cantCreateProgram
Cannot create program
-282
std_illegalProgramName
Illegal program name
-283
std_illegalVarName
Illegal variable name
-284
std_programRunning
Program currently running
-285
std_programSyntax
Program syntax
-286
std_programRuntime
Program runtime
-290
std_memoryUse
Memory use
-291
std_execOutOfMemory
Out of memory
-292
std_nameNotFound
Referenced name does not exist
-293
std_nameAlreadyExists
Referenced name already exists
-294
std_incompatibleType
Incompatible type
-300 to -399 SCPI Specified Device-Specific Errors
A device-specific error indicates that the instrument has detected an error that occurred because some operations did not properly complete, possibly due to an abnormal hardware or firmware condition. For example, an attempt by the user to set an out of range value will generate a device specific error. When one of these errors is generated, the device specific error bit in the event status register is set.
-300
std_deviceSpecific
Device specific - This event bit (Bit 3) indicates that a device operation did not properly complete due to some condition, such as overrange see IEEE 488.2, 11.5.1.1.6.
-310
std_system
System
-311
std_memory
Memory - Indicates some physical fault in the devices memory, such as a parity error.
-312
std_PUDmemoryLost
PUD memory lost - Indicates protected user data saved by the *PUD command has been lost, see IEEE 488.2, 10.27.
-313
std_calMemoryLost
Calibration memory lost - Indicates that nonvolatile calibration data used by the *CAL? command has been lost, see IEEE 488.2, 10.2.
-314
std_savRclMemoryLost
Save/recall memory lost - Indicates that the nonvolatile data saved by the *SAV command has been lost, see IEEE 488.2, 10.33.
-315
std_configMemoryLost
Configuration memory lost - Indicates that nonvolatile configuration data saved by the device has been lost.
-320
std_storageFault
Storage fault - Indicates that the firmware detected a fault when using data storage. This is not an indication of physical damage or failure of any mass storage element.
-321
std_outOfMemory
Out of memory - An internal operation needed more memory than was available
-330
std_selfTestFailed
Self-test failed - Indicates a problem with the device that is not covered by a specific error message. The device may require service.
-340
std_calFailed
Calibration failed - Indicates a problem during calibration of the device that is not covered by a specific error.
-350
std_queueOverflow
Queue overflow - Indicates that there is no room in the queue and an error occurred but was not recorded. This code is entered into the queue in lieu of the code that caused the error.
-360
std_comm
Communication - This is the generic communication error for devices that cannot detect the more specific errors described for error -361 through -363.
-361
std_parity
Parity in program message - Parity bit not correct when data received for example, on a serial port.
-362
std_framing
Framing in program message - A stop bit was not detected when data was received for example, on a serial port (for example, a baud rate mismatch).
-363
std_inputBufferOverrun
Input buffer overrun - Software or hardware input buffer on serial port overflows with data caused by improper or nonexistent pacing.
-400 to -800 Query and System Errors
A Query error is generated either when data in the instrument's GPIB output queue has been lost, or when an attempt is being made to read data from the output queue when no output is present or pending.
-400
std_queryGen
Query - This event bit (Bit 2) indicates that an attempt to read data from the Output Queues when no output is present or pending, to data in the Output Queue has been lost see IEEE488.2, 11.5.1.1.7.
-410
std_interrupted
Query INTERRUPTED - Indicates the test set has been interrupted by a new program message before it finishes sending a RESPONSE MESSAGE see IEEE 488.2, 6.3.2.3.
-420
std_unterminated
Query UNTERMINATED - Indicates an incomplete Query in the program see IEEE 488.2, 6.3.2.2.
-430
std_deadlocked
Query DEADLOCKED - Indicates that the Input Buffer and Output Queue are full see IEEE 488.2, 6.3.1.7.
-440
std_responseNotAllowed
Query UNTERMINATED after indefinite response - Indicates that a query was received in the same program message after a query requesting an indefinite response was executed see IEEE 488.2, 6.5.7.5.
-500
std_powerOn
Power on
-600
std_userRequest
User request
-700
std_requestControl
Request control
-800
std_operationComplete
Operation complete
.
Analyzer-Specific (Positive) SCPI Errors
100
dupWindNum
"Duplicate window number"
101
windNumNotFound
"Window number not found"
102
failedWindCreate
"Window creation failed"
103
noCalcParamSelection
"CALC measurement selection set to none"
See CALC:PAR:SEL
104
dupMeasName
"Duplicate measurement name"
105
dataNotFound
"Requested data not available"
106
measNotFound
"Requested measurement not found"
107
traceNotFound
"Requested trace not found"
108
notImplemented
"Mnemonic not yet implemented"
109
noDocument
"No measurement container found"
110
dupTraceNum
"Duplicate trace number"
111
titleStrTooLong
"Title string exceeds 50 characters"
112
memoryNotFound
"Requested memory not found"
113
exceedMaxTraces
"Exceeded the maximum number of traces per window"
114
SerNumNotFound
"The serial number was not found. Please store the serial number."
115
LoadFailed
"The state was not loaded. Please check the file name."
116
StoreFailed
"The state was not stored. Please check the file and path names."
117
File
"An in the File operation occurred. Please check file and path names."
118
measChanConflict
"Measurement does not belong to specified channel."
119
exceedMaxWindows
"Exceeded the maximum number of data windows"
120
markerNotFound
"The specified marker was not found."
121
diagnostic
"Diagnostic ."
122
channelNotFound
"The specified channel was not found."
123
exceedMaxMeasurements
"Exceeded the maximum number of allowed mesurements."
124
parameterOutOfRange
"The specified value was out of range."
125
userRangeNotValid
"The currently selected user range is not valid."
126
referenceMarkerNotFound
"The reference marker is not active."
127
sweepSegmentNotFound
"The sweep segment was not found."
128
markerNotDelta
"The specified marker is not a delta marker."
129
printoutFailed
"Attempt to output to a printer failed."
130
memory_trace_not_compatible
"Memory not compatible. Trace Math not applied."
131
trace_math_reset
"Memory not compatible. Trace Math turned off."
132
hw_read_failed
"Hardware read failed."
133
hw_write_failed
"Hardware write failed."
134
dsp_active
"Failed because DSP was not halted."
135
secure_memory
"Attempt to access secure memory region."
136
snum_protected
"The serial number is protected."
137
snum_format_bad
"The serial number format is bad."
138
snum_already_set
"The serial number is already set."
139
hw_setting_failed
"Hardware setting failed."
140
cal_access_failed
"Calibration data access failed."
141
db_access_failed
"Database access failed."
142
memory_range_exceeded
"Command exceeds usable memory range."
143
lost_phase_lock
"Phase lock has been lost."
144
over_power
"Detected too much power at input."
145
ee_wrt_failed
"EEPROM write failed."
146
yig_cal_failed
"YTO calibration failed."
147
ramp_cal_failed
"Analog ramp calibration failed."
148
dspcom_bad
"DSP communication failed."
149
no_license_found
"Request failed. The required license was not found."
150
argLimited
"The argument was out of range
151
markerBWNotFound
"The Marker Bandwidth was not found."
153
peakNotFound
"The Peak was not found."
154
targetNotFound
"The Target search value was not found."
155
calNotImpl
"The Calibration feature requested is not implemented."
156
calClassNotValidForCalType
"SENS:CORR:CCH measurement selection set to none"
158
calNotValidForConfidenceChe
"Selected measurement does not have a calibration valid for Confidence Check"
159
invalidPort
"Specified port is out of range"
160
invalidPortPath
"ROUT:PATH:DEF:PORT x, y does not match measurement; setting to defaults"
161
ioInvalidWrite
"Attempted I/O write while port set to read only."
162
ioInvalidRead
"Attempted I/O read from write only port."
163
calsetNotFound
"Requested Cal Set was not found in Cal Set Storage."
164
noCalSetSelected
"There is no Cal Set currently selected for the specified channel."
165
cantDeleteCalSetInUse
"Cannot delete a Cal Set while it is being used."
166
calsetStimChange
"Channel stimulus settings changed to match selected Cal Set."
167
exceedMaxCalSets
"Exceeded the maximum number of cal sets."
168
calCouldNotTurnOn
"A valid calibration is required before correction can be turned on."
169
standardMeasurementRequired
"The attempted operation can only be performed on a standard measurement type."
170
noDivisorBuffer
"A valid divisor buffer is required before normalization can be turned on."
171
InvalidReceiverPowerCalParagraph
"Receiver power cal requires the measurement to be of unratioed power."
172
ecalCouldNotConfigure
"Could not configure the Electronic Calibration system. Check to see if the module is plugged into the proper connector."
173
measHasNoMemoryAlg
"This measurement does not support memory operations"
174
measHasNoNormalizeAlg
"This measurement does not support normalize operations."
175
userCharacterizationNotFound
"User characterization was not found in the Electronic Calibration module."
176
measInvalidBufferSize
"The data provided has an invalid number of points. It could not be stored." | true |
ec266104bb8a2250332fc51644c80cff91742402 | Python | captainsoma/midi_websocket_chesscam | /Server/hub_alt.py | UTF-8 | 2,619 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
# WS server example
import websockets
import concurrent
import json
from Server.chesscam import ChessCam
import asyncio
import config
class Hub:
def __init__(self):
# First we setup the chesscam
self.cam = ChessCam()
self.connected = False
self.sequence_ready_to_send = False # is True if there is a sequence to be sent
self.new_sequence = None
async def get_new_sequence(self):
if input("For Sending press 's'\n") =="s": # if key 's' is pressed
if not self.connected:
print("client disconnected.")
else:
self.cam.run(user_trigger=True) #Getting new pictures, could be flagged as well
if self.cam.new_sequence_captured:
self.sequence_ready_to_send=True
return True
async def handler(self, websocket, path):
while True:
try:
if self.connected:
producer_task = asyncio.ensure_future(self.get_new_sequence())
await asyncio.gather(producer_task)
if self.sequence_ready_to_send:
if not websocket.open:
websocket=websockets.connect(config.client_connection)
print("Websocket closed")
self.connected=False
for task in asyncio.Task.all_tasks():
task.cancel()
return
await websocket.send(json.dumps(self.cam.track.sequences.tolist()))
self.sequence_ready_to_send = False
print("New Sequence sent")
else:
listener_task = asyncio.ensure_future(websocket.recv())
message1 = await asyncio.gather(listener_task)
if message1[0]==config.client_greeting:
print(f"{message1}")
await websocket.send(config.server_greeting)
self.connected = True
except websockets.ConnectionClosed:
print("Websocket closed.")
self.connected=False
def chesscam(self):
while not self.cam.grid_captured:
self.cam.run()
if self.cam.grid_captured:
self.new_sequence=self.cam.track.sequences
self.sequence_ready_to_send=True
return True #we can open the server now
if __name__=="__main__":
hub = Hub()
| true |
9bfb716a185cde5032832575bb8f0fd123ad98ca | Python | tws0002/helga | /helga/maya/arash/helga_rig_functionality/helga_rig_functionality.py | UTF-8 | 3,964 | 2.734375 | 3 | [] | no_license |
"""
helga_rig_functionality
==========================================
Rig helper functionality. This module separates the functionality of interacting with the Helga rigs
from any UI. It encompasses functions used with Maya modules.
There is no import of PySide in here. All UI tools that let you work with
the rigs import this module.
"""
#Import
#------------------------------------------------------------------
#python
import sys
import os
import logging
#maya
import pymel.core as pm
#Import variable
do_reload = True
#helga
#global_variables
from helga.general.setup.global_variables import global_variables
if(do_reload):reload(global_variables)
#global_functions
from helga.general.setup.global_functions import global_functions
if(do_reload):reload(global_functions)
#helga_rig_functionality
#helga_rig_globals
import helga_rig_globals as helga_rig_globals
if(do_reload): reload(helga_rig_globals)
#Globals
#------------------------------------------------------------------
#HelgaRigFunctionality class
#------------------------------------------------------------------
class HelgaRigFunctionality(object):
"""
Class that exposes functionality to interact programmaticaly with the rigs.
"""
def __init__(self,
namespace = None,
logging_level = logging.DEBUG):
"""
Customize instance.
"""
#super
self.parent_class = super(HelgaRigFunctionality, self)
self.parent_class.__init__()
#instance variables
#------------------------------------------------------------------
#namespace
self.namespace = namespace
#logger
#------------------------------------------------------------------
#logger
self.logger = logging.getLogger(self.__class__.__name__)
self.logging_level = logging_level
self.logger.setLevel(self.logging_level)
#Methods
#------------------------------------------------------------------
def reset_rig(self, namespace = None):
"""
Reset the rig to T-Pose and complete factory defaults.
If you set a keyframe after this, you have the valid preroll
T-Pose.
"""
#namespace from module
if not (namespace):
namespace = self.namespace
pass
def get_all_manipulators(self, namespace = None):
"""
Return a dictionary with the following form:
{manipulator_name : {attribute_name : attribute_value,
attribute_name : attribute_value,
attribute_name : attribute_value},
manipulator_name : {attribute_name : attribute_value,
attribute_name : attribute_value,
attribute_name : attribute_value}
}
manipulator_name: Every manipulator for the rig of the given namespace.
attribute_name: Each keyframeable attribute of the manipulator ('translateX', 'scaleY' ...).
attribute_value: The factory default (T-Pose) value.
"""
#namespace from module
if not (namespace):
namespace = self.namespace
pass
def get_dynamics_manipulators(self, namespace = None):
"""
Same as above, but this time only return dictionary for manipulators
that control dynamics attributes.
"""
#namespace from module
if not (namespace):
namespace = self.namespace
pass
#Getter & Setter
#------------------------------------------------------------------
def set_namespace(self, namespace):
"""
Set self.namespace
"""
self.namespace = namespace
def get_namespace(self):
"""
Get self.namespace
"""
return self.namespace | true |
161765f67126ccdd5db4262f078bb6b846e0af38 | Python | benvizy/space-invaders | /enemy.py | UTF-8 | 1,885 | 3.84375 | 4 | [] | no_license | from turtle import Turtle
from bullet import Bullet
import random
MOVE_DISTANCE = 10
MOVE_INCREMENT = 10
ENEMY_NUMBER = 50
LEFT_B = -300
RIGHT_B = 300
def random_color():
r = random.randint(1, 255)
g = random.randint(1, 255)
b = random.randint(1, 255)
ran_col = (r, g, b)
return ran_col
class EnemyGenerator():
def __init__(self):
self.enemies = []
self.speed = MOVE_DISTANCE
self.shots = [Bullet()]
def create_enemies(self):
for i in range(ENEMY_NUMBER):
new_enemy = Turtle("square")
new_enemy.pu()
new_enemy.color(random_color())
new_enemy.goto(300, 300)
new_enemy.left(180)
self.enemies.append(new_enemy)
def shoot(self, index):
if len(self.shots) < 7:
self.shots.append(Bullet())
self.shots[-1].goto(self.enemies[index].xcor(), self.enemies[index].ycor())
self.shots[-1].shoot(start_x=self.enemies[index].xcor(), start_y=self.enemies[index].ycor())
def setup_enemies(self, index):
# TODO: Figure out where to put the for loop so the enemies go one by one!
if abs(self.enemies[index].xcor() - self.enemies[index-1].xcor()) < 50:
self.enemies[index].forward(MOVE_DISTANCE)
if self.enemies[index].xcor() < LEFT_B:
self.enemies[index].left(90)
self.enemies[index].forward(MOVE_DISTANCE*2)
if self.enemies[index].xcor() > RIGHT_B:
self.enemies[index].right(90)
self.enemies[index].forward(MOVE_DISTANCE * 2)
def move_enemies(self):
for enemy in self.enemies:
rando = random.randint(1, 10)
enemy.forward(MOVE_DISTANCE)
if rando == 10:
self.shoot(self.enemies.index(enemy))
def faster(self):
self.speed += MOVE_INCREMENT
| true |
ff01bf2e01b1492411ec8b380590ad054df409d8 | Python | sukku777/lists-in-python | /to find largest element in list.py | UTF-8 | 45 | 2.609375 | 3 | [] | no_license | d=[23,45,6,78,98,34,67]
k=max(d)
print(k)
| true |
b29a6e9296e5bc850d972e623770cfb3446e7c29 | Python | JamesDanni/PythonSpiderStudy | /study09-SeleniumSpider/doubanmovie.py | UTF-8 | 2,154 | 3.21875 | 3 | [] | no_license | #coding=utf-8
from time import sleep as s
from urllib import parse
from selenium import webdriver
import csv
class DouBan():
def __init__(self,page_count):
self.dv = webdriver.Chrome()
self.page_count = page_count #点击加载更多的次数
self.data = []
#获取页面信息
def get_page_data(self,name):
type_name = parse.quote(name)
url = "https://movie.douban.com/tv/#!type=tv&tag={}&sort=recommend&page_limit=20&page_start=20".format(type_name)
self.dv.get(url)
s(3)
for i in range(self.page_count):
print(name)
h = self.dv.find_element_by_partial_link_text('加载更多')
h.click()
s(5)
#获取页面cotent
movie_list = self.dv.find_elements_by_xpath('//*[@id="content"]/div/div[1]/div/div[4]/div/a')
for i in range(1,len(movie_list)+1):
name_path = '//*[@id="content"]/div/div[1]/div/div[4]/div/a[{}]/div/img'.format(i)
result_path = '//*[@id="content"]/div/div[1]/div/div[4]/div/a[{}]/p/strong'.format(i)
movie_name = self.dv.find_element_by_xpath(name_path).get_attribute("alt")
if len(movie_name) == 0:
continue
movie_result = self.dv.find_element_by_xpath(result_path).text
if len(movie_result) == 0:
continue
self.data.append([movie_name,str(movie_result)])
#保存数据
def save_data(self,name):
file_name = name + "数据" + ".csv"
c_file = open(file_name,'w',newline="",encoding="utf-8-sig")
f = csv.writer(c_file)
f.writerow(["名称","评分"])
for i in self.data:
f.writerow(i)
def exit_chorm(self):
self.dv.close()
if __name__ == '__main__':
type_name = ["热门","美剧","英剧","韩剧"] #类别列表
page_count = 1 #每个类别爬取几页
for name in type_name:
_spider = DouBan(page_count)
_spider.get_page_data(name)
_spider.save_data(name)
_spider.exit_chorm()
s(10) | true |
6593c8ae5a60f6e8219d2fa0e67a8d29bd39d71b | Python | IMIO/imio.pyutils | /imio/pyutils/utils.py | UTF-8 | 7,362 | 3.046875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#
# python utils methods
# IMIO <support@imio.be>
#
from collections import OrderedDict, defaultdict
from itertools import chain
from operator import methodcaller
import copy
import itertools
import logging
import time
import timeit
def all_of_dict_values(dic, keys, labels=[], sep=u'='):
"""Returns a not empty values list from a dict following given keys.
:param dic: input dictionary
:param keys: searched keys
:param labels: corresponding labels
:param sep: separator between label and value
:return: list with corresponding values.
"""
if labels and len(labels) != len(keys):
raise ValueError(u'labels length must be the same as keys length')
ret = []
for i, key in enumerate(keys):
if dic.get(key):
ret.append(labels and u'{}{}{}'.format(labels[i], labels[i] and sep or u'', dic[key]) or dic[key])
return ret
def append(lst, value):
lst.append(value)
return value
def ftimed(f, nb=100, fmt='{:.7f}'):
duration, ret = timed(f, nb=nb)
return fmt.format(duration), ret
def get_clusters(numbers=[], separator=", "):
"""Return given p_numbers by clusters.
When p_numbers=[1,2,3,5,6,8,9,10,15,17,20],
the result is '1-3, 5-6, 8-10, 15, 17, 20'."""
clusters = itertools.groupby(numbers, lambda n, c=itertools.count(): n - next(c))
res = []
for group, cluster in clusters:
clust = list(cluster)
if len(clust) > 1:
res.append('{0}-{1}'.format(clust[0], clust[-1]))
else:
res.append('{0}'.format(clust[0]))
return separator.join(res)
def insert_in_ordereddict(dic, value, after_key='', at_position=None):
"""Insert a tuple in an new Ordereddict.
:param dic: the original OrderedDict
:param value: a tuple (key, value) that will be added at correct position
:param after_key: key name after which the tup is added
:param at_position: position at which the tup is added. Is also a default if after_key is not found
:return: a new OrderedDict or None if insertion position is undefined
"""
position = None
if after_key:
position = odict_index(dic, after_key, delta=1)
if position is None and at_position is not None:
position = at_position
if position is None:
return None
if position >= len(dic.keys()):
return OrderedDict(list(dic.items()) + [value])
tuples = []
for i, tup in enumerate(dic.items()):
if i == position:
tuples.append(value)
tuples.append(tup)
if not tuples: # dic was empty
tuples.append(value)
return OrderedDict(tuples)
def iterable_as_list_of_list(lst, cols=1):
"""Transform an iterable as list of list.
:param lst: input iterable
:param cols: number of columns in the sublists
:return: list of lists
"""
res = []
sublist = []
for i, item in enumerate(lst, start=1):
sublist.append(item)
if not i % cols:
if sublist:
res.append(sublist)
sublist = []
# put the last sublist in res
if sublist:
res.append(sublist)
return res
def merge_dicts(dicts, as_dict=True):
"""Merge dicts, extending values of each dicts,
useful for example when the value is a list.
:param dicts: the list of dicts to mergeinput iterable
:param as_dict: return a dict instead the defaultdict instance
:return: a single dict (or defaultdict)
"""
dd = defaultdict(list)
# iterate dictionary items
dict_items = map(methodcaller('items'), dicts)
for k, v in chain.from_iterable(dict_items):
dd[k].extend(v)
return as_dict and dict(dd) or dd
def odict_index(odic, key, delta=0):
"""Get key position in an ordereddict"""
for i, k in enumerate(odic):
if k == key:
return i + delta
return None
def odict_pos_key(odic, pos):
"""Get key corresponding at position"""
keys = [k for k in odic]
if pos < 0:
return None
else:
return keys[pos]
def one_of_dict_values(dic, keys):
"""Take the first value not empty in a dict following given keys"""
for key in keys:
if dic.get(key):
return dic[key]
return None
def replace_in_list(lst, value, replacement, generator=False):
"""
Replace a value in a list of values.
:param lst: the list containing value to replace
:param value: the value to be replaced
:param replacement: the new value to replace with
:param generator: will return a generator instead a list when set to True
:return: a new list/generator with replaced values
"""
def _replacer(lst, value, replacement):
new_lst = list(lst)
for item in new_lst:
if item == value:
yield replacement
else:
yield item
res = _replacer(lst, value, replacement)
if not generator:
res = list(res)
return res
def safe_encode(value, encoding='utf-8'):
"""Converts a value to encoding, only when it is not already encoded."""
if isinstance(value, unicode):
return value.encode(encoding)
return value
def setup_logger(logger, replace=logging.StreamHandler, level=20):
"""Modify logger handler level
:param logger: logger to modify
:param replace: handler type to replace
:param level: level to set
"""
for i_c, container in enumerate((logger, logger.parent)):
found = [i for i, hdl in enumerate(container.handlers) if isinstance(hdl, logging.StreamHandler)]
if found:
if i_c:
logger.parent = container = copy.copy(logger.parent)
break
else:
return
idx = found[0]
osh = copy.copy(container.handlers[idx])
osh.setLevel(level)
# remove handler from original container handlers (often parent)
container.handlers = [hdl for i, hdl in enumerate(container.handlers) if i != idx]
# put handler in logger handlers
logger.handlers.append(osh)
logger.setLevel(level)
def sort_by_indexes(lst, indexes, reverse=False):
"""Sort a list following a second list containing the order"""
return [val for (_, val) in sorted(
zip(indexes, lst), key=lambda x: x[0], reverse=reverse)]
def timed(f, nb=100): # TODO must be removed and replaced by timeit
start = time.time()
for i in range(nb):
ret = f()
return (time.time() - start) / nb, ret # difference of time is float
def time_elapsed(start, cond=True, msg=u'', dec=3, min=0.0):
"""Print elapsed time from start.
:param start: start time gotten from time_start function
:param cond: print condition
:param msg: message to include in print
:param dec: decimal precision (default to 3)
:param min: minimal elapsed value print
Usage:
from imio.pyutils.utils import time_elapsed, time_start
start = time_start()
...
time_elapsed(start, cond=obj.id=='myid', msg=u'myfunc')
"""
if not cond:
return
elapsed = timeit.default_timer() - start
if elapsed < min:
return
print(u"* {{}}: {{:.{}f}} seconds".format(dec).format(msg, elapsed))
def time_start():
"""To be used with time_elapsed."""
return timeit.default_timer()
| true |
60589bd98bc55e8a7366b9645d14f120128cf17b | Python | rafatio/cornetometro | /build_classifier.py | UTF-8 | 1,930 | 3.15625 | 3 | [] | no_license | import os
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import stopwords
from numpy.random import randint
import collections
import pickle
# THIS FILE TAKES THE BEST APPROACH FROM sentiment_analysis.py AND PROVIDES A WAY TO SAVE THE CLASSIFIER IN A FILE
def get_classified_tweets(dataset_path):
positive_tweets = []
negative_tweets = []
file = open(dataset_path)
for tweet in file:
if tweet[:11] == '###!good###':
positive_tweets.append(tweet[12:])
elif tweet[:10] == '###!bad###':
negative_tweets.append(tweet[11:])
file.close()
return positive_tweets, negative_tweets
def get_features(tokenized_tweets, sentiment):
features = []
for tweet in tokenized_tweets:
tokens = {}
for token in tweet:
tokens[token] = True
features.append((tokens, sentiment))
return features
def tokenize_tweets(tweets):
tokenized_tweets = []
for tweet in tweets:
tokenized_tweet = []
words = nltk.word_tokenize(tweet)
for word in words:
if word not in stopwords.words('english'):
tokenized_tweet.append(word)
tokenized_tweets.append(tokenized_tweet)
return tokenized_tweets
def classifier(dataset_path):
positive_tweets, negative_tweets = get_classified_tweets(dataset_path)
positive_features = get_features(tokenize_tweets(positive_tweets), "positive")
negative_features = get_features(tokenize_tweets(negative_tweets), "negative")
train_features = positive_features + negative_features
classifier = NaiveBayesClassifier.train(train_features)
return classifier
def save_classifier(dataset_path, destination_path):
nb_classifier = classifier(dataset_path)
output = open(destination_path, 'w')
pickle.dump(nb_classifier, output, protocol=pickle.HIGHEST_PROTOCOL)
output.close()
| true |
71acfb808427ca0b542b38a3ac758a1b1185ab36 | Python | Prdeeepg/Epitranscriptome-Analysis | /kmeancluster_orignal.py | UTF-8 | 2,111 | 2.859375 | 3 | [] | no_license | from scipy import stats
import numpy as np
from scipy.stats import fisher_exact
import matplotlib.pyplot as pyplot
from sklearn.cluster import KMeans
def kmeans(file_string,f,outfile):
normed = np.loadtxt(file_string, usecols=(1, 2), dtype = 'float', skiprows=1)
normed = normed.astype(float)
gene_name = np.loadtxt(file_string, usecols=(0,), skiprows=1, dtype = 'string')
print np.shape(normed)
print normed[0:10,]
np.random.seed(5)
###Transpose your data according to your question
#X=np.transpose(X)
#print np.shape(X)
###Here we need to normalize data to range from 0 to 1 or by zscore
kmeans = KMeans(n_clusters=f, random_state=0).fit(normed)
labels = kmeans.labels_
centroid = kmeans.cluster_centers_
colors = ['r', 'g', 'b', 'magenta', 'orange', 'grey', 'm', 'c']
###Above, depending on your question, determine how many clusters you should use
###Here we need to work with scikitlearn.cluster.KMeans to determine how to plot clusters on the scatterplot below
kmeans.fit(normed)
print np.shape(centroid)
pyplot.figure()
for i in range(0, f):
values = np.where(kmeans.labels_ == i)[0]
print np.shape(values)
print "0 value = ",normed[values][:,0]
print "1 value = ",normed[values][:,1]
pyplot.scatter(normed[values][:, 0], normed[values][:, 1], color=colors[i])
pyplot.title(('Kmeans clustering of the genes with ' + str(f) + ' clusters'))
labels = ["Human Liver Carcinoma", "Heatshock", "UV", "Hepatocyte GF", "Interferons", "Brain"]
for i in range(0,6):
pyplot.annotate(labels[i],(normed[i,0], normed[i,1]))
pyplot.xlabel("PC 1")
pyplot.ylabel("PC 2")
pyplot.savefig(outfile, bbox_inches='tight')
#pyplot.figure()
#pyplot.scatter(X[:,0],X[:,1])
#pyplot.scatter(X[(np.where(kmeans.labels_=0)),0],X[(np.where(kmeans.labels_=0),1]),color='red')
#pyplot.scatter(X[np.where(kmeans.labels_=1),0],X[np.where(kmeans.labels_=1),1],color='green')
#pyplot.title('Kmeans clusters: ES vs. NPC')
#pyplot.show()
return
| true |
202382d1c96ada71601b423988fcccb9ffd091c0 | Python | Carlosriosch/FiltersForSelfies | /intento3 (Falta Pulir)/recolectar_gestos.py | UTF-8 | 3,042 | 2.578125 | 3 | [
"MIT"
] | permissive | #python intento3/recolectar_gestos.py --nombre fondo --dir C:\Users\carlo\Desktop\GitHub\FiltersForSelfies\intento3\capturas
#la primera linea es para llamar al programa darle el nombre fondo a la carpeta y los archivos que se guaradar y la ubicacion.
import cv2 as cv
import os
import numpy as np
import argparse
script_dir = os.path.dirname(os.path.realpath(__file__))
image_dir = os.path.join(script_dir, 'entrenamiento')
aparser = argparse.ArgumentParser()
required_arguments = aparser.add_argument_group('required arguments')
required_arguments.add_argument('--nombre',
help='nombre de la clase',
required=True)
required_arguments.add_argument('--dir',
help='directorio donde guardar las imagenes',
required=True)
required_arguments.add_argument('--cant',
help='cantidad de imagenes',
type=int,
default=200)
required_arguments.add_argument('--dimension',
help='cantidad de imagenes',
type=int,
default=300)
required_arguments.add_argument('--dimension-salida',
help='cantidad de imagenes',
type=int,
default=200)
args = aparser.parse_args()
detector_caras = cv.CascadeClassifier('../FiltersForSelfies/OpenCVFilter/haarcascade_frontalface_default.xml')
# capturar imagen desde la webcam
cap = cv.VideoCapture(0)
CAPTURAR = False
imagenes_generadas = 0
while True:
ret, img = cap.read() # leer la webcam
img = cv.flip( img, 1 ) # flip horizontal para que sea un espejo
img_gris=cv.cvtColor(img, cv.COLOR_RGB2GRAY)
caras = detector_caras.detectMultiScale(img_gris, 1.3, 5)
for i, (xc,yc,wc,hc) in enumerate(caras):
##TODO arreglar las coordenadas de las máscaras
cv.rectangle(img, (xc,yc), (xc+wc,yc+hc), (200,55,32))
img_h, img_w = img.shape[:2]
x = xc
y = yc
w = wc
h = hc
if CAPTURAR and imagenes_generadas <= args.cant:
ejemplo = img[y:y+h, x:x+w] #recortar el rectangulo que nos interesa
isDir=os.path.isdir(os.path.join(args.dir, args.nombre))
if not isDir:
os.mkdir(os.path.join(args.dir, args.nombre), 755)
filename = os.path.join(args.dir, args.nombre, f'{args.nombre}_{imagenes_generadas}.jpg')
cv.imwrite(filename, ejemplo)
imagenes_generadas += 1
print(f'generadas {imagenes_generadas} imagenes')
cv.rectangle(img, (x,y), (x+w, y+h), (0,0,255))
cv.imshow('Ttulo de la ventana', img)
k = cv.waitKey(30)
if k == 27: # ESC (ASCII)
break
elif k == ord('f'):
CAPTURAR = ~CAPTURAR
cap.release()
cv.destroyAllWindows() | true |
bd4052ef6502f65c427d46e6f3fe1da3321bee36 | Python | duybui2905/C4T-13 | /session9/turtle_color.py | UTF-8 | 159 | 3.53125 | 4 | [] | no_license | from turtle import *
mau = ["blue", "green", "red", "orange"]
speed(-1)
shape("turtle")
for i in range (len(mau)):
color(mau[i])
forward(50)
mainloop() | true |
54b336176e7db059a3cb3d72e377c74c13f4c7a6 | Python | DimitarKum/Python-Beginner-Project | /generateData/generateProject.py | UTF-8 | 951 | 3.53125 | 4 | [] | no_license | import random as rnd
# This is the code that was used to produce the matches.csv file.
# The code can be rerun to generate different matches data since the games are randomized.
def main():
scientists = [
"Grace Hopper", "Alan Turing", "Marie Curie",
"Charles Darwin", "Nikola Tesla", "Gregor Mendel",
"John von Neumann", "Ada Lovelace", "Georg Cantor",
"David Hilbert"
]
fileOut = open("matches.csv", "w")
matchesCount = 200
maxScore = 4
fileOut.write("HomePlayer, AwayPlayer, HomeScore, AwayScore\n")
for i in range(matchesCount):
homePlayer = rnd.randint(0, len(scientists) - 1)
awayPlayer = rnd.randint(0, len(scientists) - 2)
if awayPlayer >= homePlayer:
awayPlayer += 1
homeScore = rnd.randint(0, maxScore)
awayScore = rnd.randint(0, maxScore)
fileOut.write("{}, {}, {}, {}\n".format(scientists[homePlayer], scientists[awayPlayer], homeScore, awayScore))
fileOut.close()
main() | true |
09278ba1e2dbb378f416b8e7f0119fb5ed9037e6 | Python | MrDetectiv/fb-labs-2020 | /cp_2/Ivanchenkov_Melnychenko_Cp2/viginer_test.py | UTF-8 | 1,520 | 2.96875 | 3 | [] | no_license | import unittest
from viginer_lib import exclude_letters, crypt, decrypt, crypt_file, decrypt_file, algorithm1, shift_text, build_key
import numpy as np
key = 'абвгд'
class TestMethods(unittest.TestCase):
""" тест кодирования/декодирования текста """
def test1(self):
with open('text1.txt', 'r', encoding='utf-8') as f:
text = ''.join([exclude_letters(line) for line in f])
self.assertEqual(text, decrypt(crypt(text, key), key))
def test2(self):
""" тест кодирования/декодирования файлов """
crypt_file('text1.txt', 'crypted1.txt', key)
decrypt_file('crypted1.txt', 'decrypted1.txt', key)
with open('text1.txt', 'r', encoding='utf-8') as f:
text1 = ''.join([exclude_letters(line.lower()) for line in f])
with open('decrypted1.txt', 'r', encoding='utf-8') as f:
text2 = ''.join([exclude_letters(line) for line in f])
self.assertEqual(text1, text2)
def test3(self):
""" проверка разбиения на блоки и генерации ключа """
Y_blocks = algorithm1('crypted.txt', 5)
len_y = min(np.array([len(Y_blocks[item]) for item in Y_blocks]))
crypted_freq = []
for item in Y_blocks:
Y_blocks[item] = Y_blocks[item][0:len_y]
crypted_freq.append(shift_text(Y_blocks[item]))
self.assertEqual(key, build_key([2, 3, 4, 0, 1], crypted_freq)) | true |
77fffb5903d27b3143bbf6a89e7d08388aa29ac3 | Python | zckoh/cf_cw1 | /Q5/g-min-c.py | UTF-8 | 2,299 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
File : g-min-c.py
Author : zckoh
Date : Wed Feb 20 21:20:20 2019
Brief : shortsale-constrained portfolio
"""
import numpy as np
import pandas as pd
from datetime import timedelta
import matplotlib.pyplot as plt
import math
import scipy.io
from scipy.optimize import minimize
from numpy.linalg import inv
from cvxopt import solvers, matrix
solvers.options['show_progress'] = False
#%matplotlib inline
from function_rolling_samples import *
pd.set_option('display.max_columns', 10)
# Load in the historical data for the 3 stocks (MRW,HRGV,GLEN)
MRW = pd.read_csv('../Q4/30stocks/MRW Historical Data.csv', delimiter = ',',thousands = ',')
HRGV = pd.read_csv('../Q4/30stocks/HRGV Historical Data.csv', delimiter = ',',thousands = ',')
GLEN = pd.read_csv('../Q4/30stocks/GLEN Historical Data.csv', delimiter = ',',thousands = ',')
# Reverse the data frame so that first row is the earliest data.
MRW = MRW.iloc[::-1]
HRGV = HRGV.iloc[::-1]
GLEN = GLEN.iloc[::-1]
# Re-index the dataframes
MRW = MRW.reset_index(drop=True)
HRGV = HRGV.reset_index(drop=True)
GLEN = GLEN.reset_index(drop=True)
optm_weights_list = pd.DataFrame(index = [], columns = ['W_1','W_2','W_3'])
optm_weights_list = optm_weights_list.fillna(0) # with 0s rather than NaNs
# Perform the rolling-sample approach for every 20 working day
for n in range(380,750,20):
# Pick out the subset of historical data to compute mean and covariance
MRW_subset = MRW.loc[n-380:n-1,'Date':'Price']
HRGV_subset = HRGV.loc[n-380:n-1,'Date':'Price']
GLEN_subset = GLEN.loc[n-380:n-1,'Date':'Price']
# Find the mean and covariance matrix for the subset
(mean, cov_matrix) = compute_mean_covariances(MRW_subset,HRGV_subset,GLEN_subset)
# Find the optimal weights using g-min-c
optimal_weights = compute_g_min_c_weights(cov_matrix)
# save the optimal_weights to plot later
tmp_weights = pd.DataFrame(optimal_weights, columns=['W_1','W_2','W_3'])
optm_weights_list = optm_weights_list.append(tmp_weights)
optm_weights_list = optm_weights_list.reset_index(drop=True)
plt.style.use('fivethirtyeight')
print("Graph for Total Pos using naive portfolio")
optm_weights_list.plot(figsize=(10,8))
plt.show()
| true |
97b2ca54b99381ba46e0fbdbff41c9580179bfcc | Python | marcos-sb/hacker-rank | /algorithms/strings/two-strings/Solution.py | UTF-8 | 354 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | import sys
t = int(sys.stdin.readline())
for _ in range(t):
chars = dict()
a = sys.stdin.readline().strip()
b = sys.stdin.readline().strip()
for c in a:
chars[c] = 1
found = False
for c in b:
if c in chars:
print('YES')
found = True
break
if not found:
print('NO')
| true |
f262343f70c0cef246cde4ecbb0f62e29cacc57a | Python | Retroflux/FallGuysStats | /classes/Episode.py | UTF-8 | 1,912 | 3.078125 | 3 | [] | no_license | # Episode = full round of games from start to finish
# Number of rounds
# Episode Number
# Number of Team Games
# Number of Solo Games
# Final Placement Score
class Episode:
"""docstring for Episode"""
totalNumberOfEpisodes = 0
def __init__(self, numberOfRounds, episodeNumber, finalPlayerScore, listOfRounds, numberOfTeamGames=0,
numberOfSoloGames=0):
self._numberOfRounds = numberOfRounds
self._episodeNumber = episodeNumber
self._numberOfTeamGames = numberOfTeamGames
self._numberOfSoloGames = numberOfSoloGames
self._finalPlayerScore = finalPlayerScore
self._listOfRounds = listOfRounds
@property
def numberOfRounds(self):
return self._numberOfRounds
@property
def episodeNumber(self):
return self._episodeNumber
@property
def numberOfTeamGames(self):
return self._numberOfTeamGames
@property
def numberOfSoloGames(self):
return self._numberOfSoloGames
@property
def finalPlayerScore(self):
return self._finalPlayerScore
@property
def listOfRounds(self):
return self._listOfRounds
@numberOfRounds.setter
def numberOfRounds(self, numberOfRounds):
self._numberOfRounds = numberOfRounds
@episodeNumber.setter
def episodeNumber(self, episodeNumber):
self._episodeNumber = episodeNumber
@numberOfTeamGames.setter
def numberOfTeamGames(self, numberOfTeamGames):
self._numberOfTeamGames = numberOfTeamGames
@numberOfSoloGames.setter
def numberOfSoloGames(self, numberOfSoloGames):
self._numberOfSoloGames = numberOfSoloGames
@finalPlayerScore.setter
def finalPlayerScore(self, finalPlayerScore):
self._finalPlayerScore = finalPlayerScore
@listOfRounds.setter
def listOfRounds(self, listOfRounds):
self._listOfRounds = listOfRounds
| true |
b1a0b8bb2d8df073cf0645a34130e9c14d4b0254 | Python | protist/utils | /vwtags.py | UTF-8 | 2,292 | 2.875 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
help_text = """
Extracts tags from Vimwiki files. Useful for the Tagbar plugin.
Usage:
Install Tagbar (https://github.com/preservim/tagbar/). Then, put this file
anywhere and add the following to your .vimrc:
let g:tagbar_type_vimwiki = {
\ 'ctagstype':'vimwiki'
\ , 'kinds':['h:header']
\ , 'sro':'&&&'
\ , 'kind2scope':{'h':'header'}
\ , 'sort':0
\ , 'ctagsbin':'/path/to/vwtags.py'
\ , 'ctagsargs': 'default'
\ }
The value of ctagsargs must be one of 'default', 'markdown' or 'media',
whatever syntax you use. However, if you use multiple wikis with different
syntaxes, you can, as a workaround, use the value 'all' instead. Then, Tagbar
will show markdown style headers as well as default/mediawiki style headers,
but there might be erroneously shown headers.
"""
import sys
import re
if len(sys.argv) < 3:
print(help_text)
exit()
syntax = sys.argv[1]
filename = sys.argv[2]
rx_default_media = r"^\s*(={1,6})([^=].*[^=])\1\s*$"
rx_markdown = r"^\s*(#{1,6})([^#].*)$"
if syntax in ("default", "media"):
rx_header = re.compile(rx_default_media)
elif syntax == "markdown":
rx_header = re.compile(rx_markdown)
else:
rx_header = re.compile(rx_default_media + "|" + rx_markdown)
file_content = []
try:
with open(filename, "r") as vim_buffer:
file_content = vim_buffer.readlines()
except:
exit()
state = [""]*6
for lnum, line in enumerate(file_content):
match_header = rx_header.match(line)
if not match_header:
continue
match_lvl = match_header.group(1) or match_header.group(3)
match_tag = match_header.group(2) or match_header.group(4)
cur_lvl = len(match_lvl)
cur_tag = match_tag.strip()
cur_searchterm = "^" + match_header.group(0).rstrip("\r\n") + "$"
cur_kind = "h"
state[cur_lvl-1] = cur_tag
for i in range(cur_lvl, 6):
state[i] = ""
scope = "&&&".join(
[state[i] for i in range(0, cur_lvl-1) if state[i] != ""])
if scope:
scope = "\theader:" + scope
print('{0}\t{1}\t/{2}/;"\t{3}\tline:{4}{5}'.format(
cur_tag, filename, cur_searchterm, cur_kind, str(lnum+1), scope))
| true |
97f3ea3324f4793f29e0c2960593140aa53e08ba | Python | capitao-red-beard/fluent_python | /part_2/listcomps.py | UTF-8 | 653 | 4.65625 | 5 | [] | no_license | # Example for why list comprehension is more readable than for loops.
# Build a list of Unicode codepoints from a string (1)
symbols = "$&@*^%"
codes = []
for symbol in symbols:
codes.append(ord(symbol))
print(codes)
# Build a list of Unicode codepoints from a string (2)
symbols2 = "$&@*^%"
codes2 = [ord(symbol2) for symbol2 in symbols2]
print(codes2)
# Cartesian products using lists
colours = ["black", "white"]
sizes = ["S", "M", "L"]
# Resulting list is arranged as if the for loops were nested in the
# same order as they appear in the list comprehension.
tshirts = [(colour, size) for colour in colours for size in sizes]
print(tshirts)
| true |
e6435c5379a5c24bde3206e72f611f3eef3100d2 | Python | sebasrodas/DeepLearning | /AplicacionEjercicio1AND.py | UTF-8 | 2,103 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 17:10:54 2020
@author: SEBAS
"""
import numpy as np
import pandas as pn
import matplotlib.pyplot as plt
dataSwiches = np.array(pn.read_csv('trainAND.csv'))
X_train = dataSwiches[:,1:]
Y_train = dataSwiches[:,0]
Z_train = dataSwiches[:,0]
Y_train = Y_train[:,np.newaxis]
f = X_train.shape[1]
c = Y_train.shape[1]
w = np.random.randn(f,c)
b = 1
#plt.plot(X_train.T ,'o')
#plt.show()
epocas = 5
def fnActivacion(z) :
if z>0:
return 1
else :
return -1
def train(w_,x_,b_,y_, train_=True) :
for i in range(len(x_)):
a0 = x_[i, np.newaxis]
y0 = y_[i, np.newaxis]
# print(a0,a0.shape, w_.shape, y0.shape)
z = np.dot(a0, w_)+b_
Y_modelo = fnActivacion(z)
Y_costo[i] = Y_modelo
# print(Y_modelo)
if Y_modelo != y0 and train_:
# print(y0.shape)
# print(a0.shape)
error = np.dot(y0,a0)
# print('e'+str(error.shape))
w_ = w_+error.T
# print(w_.shape)
b_ = b_+y0
return Y_modelo, w_, b_
Y_costo = np.zeros((Y_train.shape[0],Y_train.shape[1]))
Loss = []
for j in range(epocas):
Y_modelo,w,b =train(w, X_train, b,Y_train)
print(Y_train, Y_costo)
Loss.append(np.mean(Y_costo-Y_train)**2)
Resolucion = 50
_x0 = np.linspace(-2,2,Resolucion)
_x1 = np.linspace(-2,2,Resolucion)
_y0 = np.zeros((Resolucion, Resolucion))
for i0, x0 in enumerate(_x0):
for i1, x1 in enumerate(_x1):
X_predict = np.array([[x0,x1]])
_y0[i0,i1],_,_ = train(w, X_predict[:, np.newaxis], b, np.array([1]), False)
plt.pcolormesh(_x0, _x1, _y0, cmap = "coolwarm")
plt.axis('equal')
plt.plot(X_train.T,'o')
plt.show()
# plt.plot(range(len(Loss)),Loss)
# plt.show()
X_predict = np.array([[-1,-1]])
Y_predict,_,_ = train(w, X_predict[:, np.newaxis], b, np.array([1]), False)
print("Entrada del modelo "+str(X_predict))
print("Salida del modelo "+str(Y_predict))
| true |
14b09c77777408740466c4b9d9c161db79e2d935 | Python | shivamaroraa/CodeSignal | /Intro/Python/checkPalindrome.py | UTF-8 | 194 | 3.21875 | 3 | [] | no_license | def checkPalindrome(inputString):
isP = True
for i in range(len(inputString)):
if inputString[i] != inputString[len(inputString) - 1 - i]:
isP = False
return isP | true |
1108fbdf0373a52f3be1ef3879ac9195be591d4e | Python | dariogomes/udacity | /Project1/Exercises/script.py | UTF-8 | 774 | 3.828125 | 4 | [] | no_license | # coding: utf-8
# Começando com os imports
import csv
import matplotlib.pyplot as plt
# Vamos ler os dados como uma lista
print("Lendo o documento...")
with open("capacity.csv", "r", encoding="utf-8", errors="ignore") as file_read:
reader = csv.reader(file_read)
data_list = list(reader)
print("Ok! File Read.")
print("Linha 0: ")
print(data_list[0])
# Vamos mudar o data_list para remover o cabeçalho dele.
data_list = data_list[1:]
# Nós podemos acessar as features pelo índice
# Por exemplo: sample[6] para imprimir gênero, ou sample[-2]
# TAREFA 2
# TODO: Imprima o `gênero` das primeiras 20 linhas
print("\nTAREFA 2: Imprimindo o gênero das primeiras 20 amostras")
i = 1
for row in data_list:
if i <= 3:
print (row[5])
i += 1
| true |
215fe67d71e01d9bdb128e4a349e2aa58e116e02 | Python | ShreJais/GMM | /inference/tensorflow/gmm_means_cavi.py | UTF-8 | 8,339 | 2.5625 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
"""
Coordinate Ascent Variational Inference process to approximate a mixture
of gaussians with common variance for all classes
"""
from __future__ import absolute_import
import argparse
import math
import os
import pickle as pkl
import sys
from time import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from utils import dirichlet_expectation, log_beta_function, softmax
from viz import plot_iteration
"""
Parameters:
* maxIter: Max number of iterations
* dataset: Dataset path
* k: Number of clusters
* verbose: Printing time, intermediate variational parameters, plots, ...
Execution:
python gmm_means_gavi.py -dataset data_k2_1000.pkl -k 2 -verbose
"""
parser = argparse.ArgumentParser(description='CAVI in mixture of gaussians')
parser.add_argument('-maxIter', metavar='maxIter', type=int, default=300)
parser.add_argument('-dataset', metavar='dataset', type=str,
default='../../data/synthetic/2D/k2/data_k2_1000.pkl')
parser.add_argument('-k', metavar='k', type=int, default=2)
parser.add_argument('-verbose', dest='verbose', action='store_true')
parser.set_defaults(verbose=False)
args = parser.parse_args()
K = args.k
VERBOSE = args.verbose
THRESHOLD = 1e-6
sess = tf.Session()
# Get data
with open('{}'.format(args.dataset), 'r') as inputfile:
data = pkl.load(inputfile)
xn = data['xn']
xn_tf = tf.convert_to_tensor(xn, dtype=tf.float64)
N, D = xn.shape
if VERBOSE: init_time = time()
# Model hyperparameters
alpha_aux = [1.0] * K
m_o_aux = np.array([0.0, 0.0])
beta_o_aux = 0.01
delta_o_aux = np.zeros((D, D), long)
np.fill_diagonal(delta_o_aux, 1)
# Priors (TF castings)
alpha_o = tf.convert_to_tensor([alpha_aux], dtype=tf.float64)
m_o = tf.convert_to_tensor([list(m_o_aux)], dtype=tf.float64)
beta_o = tf.convert_to_tensor(beta_o_aux, dtype=tf.float64)
delta_o = tf.convert_to_tensor(delta_o_aux, dtype=tf.float64)
# Initializations
lambda_phi_aux = np.random.dirichlet(alpha_aux, N)
lambda_pi_aux = alpha_aux + np.sum(lambda_phi_aux, axis=0)
lambda_beta_aux = beta_o_aux + np.sum(lambda_phi_aux, axis=0)
lambda_m_aux = np.tile(1. / lambda_beta_aux, (2, 1)).T * \
(beta_o_aux * m_o_aux + np.dot(lambda_phi_aux.T, xn))
# Variational parameters
lambda_phi = tf.Variable(lambda_phi_aux, dtype=tf.float64)
lambda_pi = tf.Variable(lambda_pi_aux, dtype=tf.float64)
lambda_beta = tf.Variable(lambda_beta_aux, dtype=tf.float64)
lambda_m = tf.Variable(lambda_m_aux, dtype=tf.float64)
# Reshapes
lambda_mu_beta_res = tf.reshape(lambda_beta, [K, 1])
# Lower Bound definition
LB = log_beta_function(lambda_pi)
LB = tf.subtract(LB, log_beta_function(alpha_o))
LB = tf.add(LB, tf.matmul(tf.subtract(alpha_o, lambda_pi),
tf.reshape(dirichlet_expectation(lambda_pi),
[K, 1])))
LB = tf.add(LB, tf.multiply(tf.cast(K / 2., tf.float64),
tf.log(tf.matrix_determinant(
tf.multiply(beta_o, delta_o)))))
LB = tf.add(LB, tf.cast(K * (D / 2.), tf.float64))
for k in range(K):
a1 = tf.subtract(lambda_m[k, :], m_o)
a2 = tf.matmul(delta_o, tf.transpose(tf.subtract(lambda_m[k, :], m_o)))
a3 = tf.multiply(tf.div(beta_o, 2.), tf.matmul(a1, a2))
a4 = tf.div(tf.multiply(tf.cast(D, tf.float64), beta_o),
tf.multiply(tf.cast(2., tf.float64), lambda_mu_beta_res[k]))
a5 = tf.multiply(tf.cast(1 / 2., tf.float64),
tf.log(tf.multiply(tf.pow(lambda_mu_beta_res[k], 2),
tf.matrix_determinant(delta_o))))
a6 = tf.add(a3, tf.add(a4, a5))
LB = tf.subtract(LB, a6)
b1 = tf.transpose(lambda_phi[:, k])
b2 = dirichlet_expectation(lambda_pi)[k]
b3 = tf.log(lambda_phi[:, k])
b4 = tf.multiply(tf.cast(1 / 2., tf.float64),
tf.log(tf.div(tf.matrix_determinant(delta_o),
tf.multiply(tf.cast(2., tf.float64),
math.pi))))
b5 = tf.subtract(xn_tf, lambda_m[k, :])
b6 = tf.matmul(delta_o, tf.transpose(tf.subtract(xn_tf, lambda_m[k, :])))
b7 = tf.multiply(tf.cast(1 / 2., tf.float64),
tf.stack([tf.matmul(b5, b6)[i, i] for i in range(N)]))
b8 = tf.div(tf.cast(D, tf.float64),
tf.multiply(tf.cast(2., tf.float64), lambda_beta[k]))
b9 = tf.subtract(tf.subtract(tf.add(tf.subtract(b2, b3), b4), b7), b8)
b1 = tf.reshape(b1, [1, N])
b9 = tf.reshape(b9, [N, 1])
LB = tf.add(LB, tf.reshape(tf.matmul(b1, b9), [1]))
# Parameter updates
assign_lambda_pi = lambda_pi.assign(
tf.reshape(tf.add(alpha_o, tf.reduce_sum(lambda_phi, 0)), [K, ]))
c1 = dirichlet_expectation(lambda_pi)
phi_tmp = []
for n in range(N):
k_list = []
for k in range(K):
c2 = tf.reshape(tf.subtract(xn_tf[n, :], lambda_m[k, :]), [1, D])
c3 = tf.matmul(delta_o, tf.reshape(
tf.transpose(tf.subtract(xn_tf[n, :], lambda_m[k, :])), [D, 1]))
c4 = tf.multiply(tf.cast(-1 / 2., tf.float64), tf.matmul(c2, c3))
c5 = tf.div(tf.cast(D, tf.float64),
tf.multiply(tf.cast(2., tf.float64), lambda_beta[k]))
k_list.append(tf.add(c1[k], tf.subtract(c4, c5)))
phi_tmp.append(tf.reshape(softmax(tf.stack(k_list)), [K, 1]))
assign_lambda_phi = lambda_phi.assign(tf.reshape(tf.stack(phi_tmp), [N, K]))
assign_lambda_beta = lambda_beta.assign(
tf.add(beta_o, tf.reduce_sum(lambda_phi, axis=0)))
d1 = tf.transpose(
tf.reshape(tf.tile(tf.div(tf.cast(1., tf.float64), lambda_beta), [D]),
[D, K]))
d2 = tf.add(tf.multiply(m_o, beta_o), tf.matmul(tf.transpose(lambda_phi), xn_tf))
assign_lambda_m = lambda_m.assign(tf.multiply(d1, d2))
# Summaries definition
tf.summary.histogram('lambda_phi', lambda_phi)
tf.summary.histogram('lambda_pi', lambda_pi)
tf.summary.histogram('lambda_mu_m', lambda_m)
tf.summary.histogram('lambda_mu_beta', lambda_beta)
merged = tf.summary.merge_all()
file_writer = tf.summary.FileWriter('/tmp/tensorboard/', tf.get_default_graph())
def main():
# Plot configs
if VERBOSE:
plt.ion()
fig = plt.figure(figsize=(10, 10))
ax_spatial = fig.add_subplot(1, 1, 1)
circs = []
sctZ = None
# Inference
init = tf.global_variables_initializer()
sess.run(init)
lbs = []
n_iters = 0
for _ in range(args.maxIter):
# Variational parameter updates
sess.run(assign_lambda_pi)
sess.run(assign_lambda_phi)
sess.run(assign_lambda_beta)
sess.run(assign_lambda_m)
m_out, beta_out, pi_out, phi_out = sess.run(
[lambda_m, lambda_beta, lambda_pi, lambda_phi])
# ELBO computation
mer, lb = sess.run([merged, LB])
lbs.append(lb[0][0])
if VERBOSE:
print('\n******* ITERATION {} *******'.format(n_iters))
print('lambda_pi: {}'.format(pi_out))
print('lambda_beta: {}'.format(beta_out))
print('lambda_m: {}'.format(m_out))
print('lambda_phi: {}'.format(phi_out[0:9, :]))
print('ELBO: {}'.format(lb))
ax_spatial, circs, sctZ = plot_iteration(ax_spatial, circs, sctZ,
sess.run(lambda_m),
sess.run(delta_o),
xn, n_iters, K)
# Break condition
improve = lb - lbs[n_iters - 1]
if VERBOSE: print('Improve: {}'.format(improve))
if (n_iters == (args.maxIter - 1)) \
or (n_iters > 0 and 0 < improve < THRESHOLD):
if VERBOSE and D == 2: plt.savefig('generated/plot.png')
break
n_iters += 1
file_writer.add_summary(mer, n_iters)
if VERBOSE:
print('\n******* RESULTS *******')
for k in range(K):
print('Mu k{}: {}'.format(k, m_out[k, :]))
final_time = time()
exec_time = final_time - init_time
print('Time: {} seconds'.format(exec_time))
print('Iterations: {}'.format(n_iters))
print('ELBOs: {}'.format(lbs))
if __name__ == '__main__': main()
| true |
86bfa78a3a4e7a6bc9428420e6bef56805edf80c | Python | my-xh/cs-course-project | /homework/10_4_1.py | UTF-8 | 4,279 | 4 | 4 | [] | no_license | """
AVL树 这是一种平衡树
每次插入key时,都会调整树的结构使其保持为平衡二叉树
每个节点都有平衡因子bf=左子树高度-右子树高度
如果bf>0说明左重,小于说明右重
如果bf在-1到1之间,称为平衡树
调整方法:
左重:如果左子节点右重,先左旋左子节点,再右旋当前节点
右重:如果右子节点左重,先右旋右子节点,再左旋当前节点
"""
from pythonds.trees.bst import BinarySearchTree, TreeNode
class AVLTree(BinarySearchTree):
# 重新定义_put方法
def _put(self, key, val, current_node):
if key < current_node.key: # 放左子树
if current_node.hasLeftChild(): # 已有左子节点
self._put(key, val, current_node.leftChild)
else:
current_node.leftChild = TreeNode(key, val, parent=current_node)
self.updateBalance(current_node.leftChild) # 对树重新进行平衡
else: # 放右子树
if current_node.hasRightChild(): # 已有右子节点
self._put(key, val, current_node.leftChild)
else:
current_node.rightChild = TreeNode(key, val, parent=current_node)
self.updateBalance(current_node.rightChild) # 对树重新进行平衡
def updateBalance(self, node):
if node.balanceFactor > 1 or node.balanceFactor < -1: # 出现不平衡
self.rebalance(node) # 进行平衡
return
if node.parent != None: # 平衡因子的更新是否传递
if node.isLeftChild():
node.parent.balanceFactor += 1
elif node.isRightChild():
node.parent.balanceFactor -= 1
if node.parent.balanceFactor != 0: # 平衡因子为0后不需要继续更新
self.updateBalance(node.parent) # 继续向上传递调整
def rebalance(self, node):
if node.balanceFactor < 0: # 当前节点右重
if node.rightChild.balanceFactor > 0: # 右子节点左重
self.rotateRight(node.rightChild) # 先右旋右子节点
self.rotateLeft(node) # 再左旋当前节点
else:
self.rotateLeft(node)
elif node.balanceFactor > 0: # 当前节点左重
if node.leftChild.balanceFactor < 0: # 左子节点右重
self.rotateLeft(node.leftChild) # 先左旋左子节点
self.rotateRight(node) # 右旋当前节点
else:
self.rotateRight(node)
def rotateLeft(self, rotRoot): # 左旋
newRoot = rotRoot.rightChild # 以当前节点的右子节点作为新的中间节点
rotRoot.rightChild = newRoot.leftChild # 右子节点的左子节点变为当前节点的右子节点
if newRoot.leftChild != 0:
newRoot.leftChild.parent = rotRoot
newRoot.parent = rotRoot.parent
if rotRoot.isRoot(): # 如果当前节点是根结点
self.root = newRoot
else:
if rotRoot.isLeftChild(): # 当前节点是左节点
rotRoot.parent.leftChild = newRoot
else: # 当前节点是右节点
rotRoot.parent.rightChild = newRoot
newRoot.leftChild = rotRoot
rotRoot.parent = newRoot
rotRoot.balanceFactor = rotRoot.balanceFactor+1-min(newRoot.balanceFactor, 0) # 计算方法看课件
newRoot.balanceFactor = newRoot.balanceFactor+1+max(rotRoot.balanceFactor, 0)
def rotateRight(self,rotRoot):
newRoot = rotRoot.leftChild
rotRoot.leftChild = newRoot.rightChild
if newRoot.rightChild != None:
newRoot.rightChild.parent = rotRoot
newRoot.parent = rotRoot.parent
if rotRoot.isRoot():
self.root = newRoot
else:
if rotRoot.isRightChild():
rotRoot.parent.rightChild = newRoot
else:
rotRoot.parent.leftChild = newRoot
newRoot.rightChild = rotRoot
rotRoot.parent = newRoot
rotRoot.balanceFactor = rotRoot.balanceFactor - 1 - max(newRoot.balanceFactor, 0)
newRoot.balanceFactor = newRoot.balanceFactor - 1 + min(rotRoot.balanceFactor, 0)
| true |
e474df0c448b1630b678756b1e08b9eeefc083dd | Python | Aasthaengg/IBMdataset | /Python_codes/p02971/s105000392.py | UTF-8 | 193 | 2.96875 | 3 | [] | no_license | N = int(input())
A = []
for _ in range(N):
A.append(int(input()))
newA = sorted(A)
ma = newA[N-1]
for i in range(N):
if A[i] == ma:
print(newA[N-2])
else:
print(ma) | true |
eb6cc8ae81d7bf0555bd5eadda638542a417d60f | Python | fletchermoore/AnkiNotebooks | /src/anki_notebooks/exporter.py | UTF-8 | 1,735 | 2.625 | 3 | [] | no_license | from anki.exporting import Exporter
from .write import writeDoc
from .paths import escapedCardToPath, pathsToBullets
import re
class DocExporter(Exporter):
key = _("Cards as Word Document")
ext = ".docx"
def __init__(self, col):
Exporter.__init__(self, col)
def exportInto(self, path):
self.count = 0 # required for success prompt
bullets = self.createBullets()
writeDoc(path, bullets)
def escapeText(self, text):
"Escape newlines, tabs, CSS."
# fixme: we should probably quote fields with newlines
# instead of converting them to spaces
text = text.replace("\n", " ")
text = text.replace("\t", " " * 8)
text = re.sub("(?i)<style>.*?</style>", "", text)
text = re.sub(r"\[\[type:[^]]+\]\]", "", text)
# if "\"" in text:
# text = "\"" + text.replace("\"", "\"\"") + "\""
return text
def createBullets(self):
ids = sorted(self.cardIds())
def esc(s):
# strip off the repeated question in answer if exists
s = re.sub("(?si)^.*<hr id=answer>\n*", "", s)
return self.escapeText(s)
paths = []
for cid in ids:
c = self.col.getCard(cid)
question = esc(c.q())
answer = esc(c.a())
path = escapedCardToPath(question, answer)
paths.append(path)
bullets = pathsToBullets(paths)
return bullets
def cardIds(self):
if not self.did:
cids = self.col.db.list("select id from cards")
else:
cids = self.col.decks.cids(self.did, children=True)
self.count = len(cids)
return cids
| true |
9ff7c88767b8edf11d570eac55cbc783e3491fd0 | Python | JavaRod/SP_Python220B_2019 | /students/alexander_boone/lesson01/activity/calculator/adder.py | UTF-8 | 265 | 2.90625 | 3 | [] | no_license | """
This module provides a addition operator.
"""
class Adder:
"""Perform addition operation on two input parameters."""
@staticmethod
def calc(operand_1, operand_2):
"""Return operand 2 plus operand 1."""
return operand_1 + operand_2
| true |
0e66afc27aae6360984a744e79a18f889048f363 | Python | eliorcc/ieeextreme-programming-competition | /game_of_stones.py | UTF-8 | 518 | 3.34375 | 3 | [] | no_license | test_cases = int(input())
result_list = []
for i in range(test_cases):
number_of_games = int(input())
pile_size_list = []
sum = 0
for j in range(number_of_games):
piles = int(input())
pile_sizes = input().split(" ")
for t in pile_sizes:
pile_size_list.append(int(t))
for r in pile_size_list:
sum += r//2
if(sum % 2 == 0):
result_list.append('Bob')
else:
result_list.append('Alice')
for result in result_list:
print(result)
| true |
234be0092c11de430b257d101c47ba0c9bc4096f | Python | hchen13/tof-ai | /gesture_python/data_preprocess.py | UTF-8 | 1,717 | 2.640625 | 3 | [] | no_license | import matplotlib
matplotlib.use('TkAgg')
import cv2
import pathlib
import settings
def display_image(*images, col=None, width=20):
from matplotlib import pyplot as plt
import numpy as np
if col is None:
col = len(images)
row = np.math.ceil(len(images) / col)
plt.figure(figsize=(width, (width + 1) * row / col))
for i, image in enumerate(images):
plt.subplot(row, col, i + 1)
# plt.axis('off')
plt.imshow(image, cmap='gray')
plt.show()
if __name__ == '__main__':
data_root = pathlib.Path(settings.DATA_ROOT)
origin_image_root = data_root.joinpath('raw')
dest_image_root = data_root.joinpath('gestures')
origin_image_paths = list(origin_image_root.glob('*/*.*'))
origin_image_paths = [str(path) for path in origin_image_paths]
label_names = sorted(item.name for item in origin_image_root.glob('*/') if item.is_dir())
origin_image_labels = [pathlib.Path(path).parent.name for path in origin_image_paths]
for i, path in enumerate(origin_image_paths):
if i % 10 == 0:
print('.', end='', flush=True)
image_raw = cv2.imread(path)
if image_raw is None:
continue
dest_path = dest_image_root.joinpath(origin_image_labels[i])
if not dest_path.exists():
dest_path.mkdir()
height, width = image_raw.shape[:2]
short = min(height, width)
ratio = short / 224
new_height = int(height / ratio)
new_width = int(width / ratio)
resized = cv2.resize(image_raw, (new_width, new_height))
name = 'img{}.jpg'.format(i)
fullname = dest_path.joinpath(name)
cv2.imwrite(str(fullname), resized)
| true |
b0fa7cc5083081a65f4d588e64b83735bc248a30 | Python | rjr5838/EagleIslandAddons | /libTAS/EagleIsland2libTAS.py | UTF-8 | 5,994 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# This script converts .tas files from EagleIsland TAS tool
# (https://github.com/rjr5838/EagleIslandTAS/) to libTAS input file.
# Just run ./EagleIsland2libTAS path/to/tasfile.tas
import glob
import math
import os
import re
import sys
def main():
EagleIsland2libTAS().convert()
def get_line(label_or_line_number, file):
try:
return int(label_or_line_number)
except ValueError:
current_line = 0
for line in file:
current_line += 1
if line == f'#{label_or_line_number}\n':
return current_line
return float('inf')
class EagleIsland2libTAS:
def __init__(self):
self.input_file = None
self.output_file = None
self.regex_input = re.compile(r'[\s]*([\d]*)((?:,(?:[RLUDJKXCGSQNFO]|[\d.]*))*)')
self.regex_comment = re.compile(r'[\s]*(#|[\s]*$)')
self.frame_counter = 0
def convert(self):
self.input_file = open(sys.argv[1], 'r')
self.output_file = open(f'{os.path.splitext(sys.argv[1])[0]}.ltm', 'w')
# Perform the actual conversion
self.export_file(self.input_file)
self.output_file.close()
def get_read_data(self, line: str):
index = line.find(',')
if index > 0:
file_path = line[0:index]
else:
file_path = line[0:-1]
file_path = f'{os.path.dirname(sys.argv[1])}/{file_path}'
# Check if full filename was used, get file if it wasn't
if not os.path.exists(file_path):
files = [f for f in glob.glob(f'{file_path}*.tas')]
if not files:
return None, None, None
file_path = str(files[0])
file = open(file_path, 'r')
skip_lines = 0
line_len = float('inf')
# Check how many line numbers were given and convert any labels to lines
if index > 0:
index_len = line.find(',', index + 1)
if index_len > 0:
start_line = line[index + 1: index_len]
end_line = line[index_len + 1:-1]
skip_lines = get_line(start_line, file)
line_len = skip_lines + get_line(end_line, file)
else:
start_line = line[index + 1:-1]
skip_lines = get_line(start_line, file)
if skip_lines is None:
skip_lines = 0
print(f"Reading {line[0:-1]} from {skip_lines} to {line_len}, at frame {self.frame_counter}")
return file, skip_lines, line_len
def export_file(self, file, start_line=0, end_line=float('inf')):
file.seek(0)
cur_line = 0
skip_line = False
for line in file:
cur_line += 1
line_lower = line.lower()
if cur_line <= start_line:
continue
if cur_line > end_line:
break
if skip_line:
skip_line = False
continue
if self.regex_comment.match(line):
continue
if line_lower.startswith('read'):
read_path, start, end = self.get_read_data(line[5:])
if read_path is not None:
self.export_file(read_path, start, end)
continue
if line_lower.startswith('add'):
line = line[3:]
if line_lower.startswith('skip'):
skip_line = True
continue
match = self.regex_input.match(line)
if match:
output_keys = ''
button_order = 'ABXYbgs()[]udlr'
button_mapping = 'JXCK..S...GUDLR'
output_buttons = ['.'] * 15
output_axes = '0:0'
is_axis = False
for single_input in match.group(2).split(',')[1:]:
if is_axis:
angle = 0 if single_input == '' else float(single_input)
# Compute coordinates of the left analog stick to match the
# requested angle. Use the max amplitude to get precise values.
# We must also compensate for the deadzone which is 0.239532471f
rad_angle = math.radians(angle)
deadzone = 0.239532471
float_x = math.copysign(math.fabs(math.sin(rad_angle)) * (1 - deadzone) + deadzone, math.sin(rad_angle))
float_y = math.copysign(math.fabs(math.cos(rad_angle)) * (1 - deadzone) + deadzone, math.cos(rad_angle))
x = 32767 * float_x
y = -32767 * float_y
output_axes = f'{str(int(x))}:{str(int(y))}'
is_axis = False
continue
if single_input == 'F':
is_axis = True
continue
if single_input == 'O':
output_keys = 'ff0d'
elif single_input == 'Q':
output_keys = '72'
else:
output_keys = ''
# Look at the mapping of the action
mapped_index = button_mapping.find(single_input)
output_buttons[mapped_index] = button_order[mapped_index]
# Write the constructed input line, ignore false positive matches
output_line = f'|{output_keys}|{output_axes}:0:0:0:0:{"".join(output_buttons)}|.........|\n'
try:
for n in range(int(match.group(1))):
self.frame_counter += 1
self.output_file.write(output_line)
except ValueError:
print(f"Ignoring {line[0:-1]}")
print(f"Read {cur_line - start_line} lines from {file.name}")
file.close()
if __name__ == '__main__':
main()
| true |
006df71f5d16a383d5ffc1deaaf3103b1e24cbc7 | Python | shashasi/BRKRST-2600 | /off_the_box_demo.py | UTF-8 | 2,204 | 2.734375 | 3 | [] | no_license | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#THIS SCRIPT IS DESIGNED TO BE RUN FROM AN EXTERNAL MACHINE HAVING TELNET ACCESS TO NETWORK DEVICES.
#===============
#READ-ME
#===============
#Write and save IP Addresses of network devices in IP_Address.txt in same dir as this script, one in each line
#Write and save new config that needs to be pushed to network devices defined above in startup-config.txt in same dir as this script
#===============
import sys
import telnetlib,time
#Pass username and password as variables while running the script
user = sys.argv[1]
password = sys.argv[2]
#Privilege level 15 set for this user so enable pwd is not needed
#Load IP addresses of devices
with open("IP_Address.txt", "r") as object_1:
IP = []
for object in object_1:
IP.append(object.rstrip())
#Load startup-config
with open("startup-config.txt", "r") as object_2:
START_UP = []
for object in object_2:
START_UP.append(object.rstrip())
#Copy configuration
def copy_start(tn):
tn.write("conf t\r")
time.sleep(0.1)
print ("In config mode. Writing config now..")
time.sleep(2)
for line in START_UP:
tn.write(line + "\r")
print line
time.sleep(0.05)
#iterate through IPs, telnet and call copy_start()
for ip in IP:
tn = telnetlib.Telnet(ip)
tn.read_until("Username:")
tn.write(user + "\n")
tn.read_until("Password:")
tn.write(password + "\n")
tn.read_until("#")
copy_start(tn)
tn.write("wr\n")
tn.close() | true |
6da16cf57c15aff82bffc7af23df3fb5ed944b8f | Python | Emoto13/Python101 | /week4/01.Mixins/mixins.py | UTF-8 | 2,266 | 3.09375 | 3 | [] | no_license | import json
from dicttoxml import dicttoxml
import jxmlease
class JsonableMixin:
def to_json(self, indent=4):
name = self.__class__.__name__
attributes = self.__dict__
return json.dumps({'type': name, 'dict': attributes}, indent=indent)
@classmethod
def from_json(cls, json_string):
data = json.loads(json_string)
class_name = data['type']
if class_name != cls.__name__:
raise ValueError('Wrong type.')
attributes = data['dict']
return cls(**attributes)
class SetAttributesMixin:
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class EqualAttributesMixin:
def __eq__(self, other):
return self.__dict__ == other.__dict__
# from_xml can return object which field values are strings
class XMLableMixin:
def to_xml(self):
name = self.__class__.__name__
return dicttoxml(self.__dict__, custom_root=name, attr_type=False).decode("utf-8")[39:]
@classmethod
def from_xml(cls, xml_data):
parsed = jxmlease.parse(xml_data)
dictionary = json.loads(json.dumps(parsed))
class_name = cls.__name__
if class_name not in dictionary:
raise ValueError('Wrong type')
attributes = dictionary[class_name]
if attributes == '':
return cls()
simplify_attributes(attributes)
return cls(**attributes)
def simplify_attributes(attributes):
for key, val in attributes.items():
if type(val) is list or type(val) is tuple:
continue
if type(val) is dict:
simplify_dict(attributes, key)
continue
if is_float(val):
attributes[key] = float(val)
continue
if val.isdigit():
attributes[key] = int(val)
continue
def simplify_dict(attributes, key):
for k, v in attributes[key].items():
if is_float(v):
attributes[key][k] = float(v)
continue
if v.isdigit():
attributes[key][k] = int(v)
continue
def is_float(string):
if '.' not in string:
return False
return string.replace('.', '', 1).isdigit()
| true |
6c1d9099e93402d739b2425403094894ec17b3c2 | Python | EdenAraura/Games-Programming | /Classwork/Lesson_3/pythonClass3.1.py | UTF-8 | 176 | 3.09375 | 3 | [] | no_license | list = ["mix", "xyz", "apple", "xanadu", "rovio"]
a = []
b = []
for y in list:
if y[0] == "x":
a.append(y)
else:
b.append(y)
print(sorted(a)+sorted(b))
| true |
01260961e9ef81352241d8abadb9cdf833794a77 | Python | JoakimHaurum/DL_Projects | /dlrepo/models/utils/saving.py | UTF-8 | 1,338 | 2.828125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
@author: Joakim Bruslund Haurum
"""
import pickle
import matplotlib.pyplot as plt
import numpy as np
def save_img_grid(dir_path, file_name, imgs, nh, nw, title = ""):
"""
Saves the provided 2D images in a grid of size nw x nh in the designated directory
"""
assert imgs.shape[0] == nh*nw
if imgs.shape[1] in [1, 3, 4]:
imgs = imgs.transpose(0, 2, 3, 1)
if imgs.shape[-1] == 1:
imgs = np.squeeze(imgs)
cmap = "gray"
else:
cmap = None
plt.figure()
plt.clf()
plt.suptitle(title)
for x in range(1, nh*nw+1):
plt.subplot(nh, nw, x)
plt.imshow(imgs[x-1], cmap = cmap)
plt.axis('off')
plt.savefig('{}.pdf'.format(dir_path+file_name), bbox_inches="tight")
def save_model(dir_path, file_name, model, epoch):
"""
Saves the provided keras model .hdf5 file in the designated directory
"""
print(dir_path+file_name+"_"+str(epoch)+".hdf5")
model.save(dir_path+file_name+"_"+str(epoch)+".hdf5")
def save_loss_log(dir_path, filename, loss_log):
"""
Saves the provided loss log pickle file in the designated directory
"""
f = open(dir_path+filename,'wb')
pickle.dump(loss_log, f, protocol = pickle.HIGHEST_PROTOCOL)
f.close()
| true |
43609764e36d6680a16efb14f09aaa507224d2fb | Python | chrishanson06/Flask-API | /Flask/database/models.py | UTF-8 | 1,724 | 2.796875 | 3 | [
"MIT"
] | permissive | '''
Models to serialize between MongoDB and Python
'''
from .db import db
from flask_bcrypt import generate_password_hash, check_password_hash
import onetimepass
import base64, os, random, string
class Card(db.Document):
name = db.StringField()
content = db.StringField()
width = db.IntField()
height = db.IntField()
owner = db.ReferenceField('User')
def serialize(self):
return {
'id': str(self.pk),
'name': self.name,
'content': self.content,
'width': self.width,
'height': self.height
}
class User(db.Document):
email = db.EmailField(required=True, unique=True)
password = db.StringField(required=True, min_length=6)
otpSecret = db.StringField()
salt = db.StringField()
admin = db.BooleanField()
cards = db.ListField(db.ReferenceField('Card', reverse_delete_rule=db.PULL))
def hash_password(self):
chars = string.ascii_letters + string.punctuation
size = 12
self.salt = ''.join(random.choice(chars) for x in range(size))
self.password = generate_password_hash(self.password + self.salt).decode('utf8')
if self.otpSecret is None:
self.otpSecret = base64.b32encode(os.urandom(10)).decode('utf8')
def check_password(self, password):
return check_password_hash(self.password, password + self.salt)
def get_totp_uri(self):
return 'otpauth://totp/Flask-API:{0}?secret={1}&issuer=Flask-API' \
.format(self.email, self.otpSecret)
def verify_totp(self, token):
return onetimepass.valid_totp(token, self.otpSecret)
def serialize(self):
mappedCards = list(map(lambda c: c.serialize(), self.cards))
return {
'id': str(self.pk),
'email': self.email,
'admin': self.admin,
'cards': mappedCards
}
User.register_delete_rule(Card, 'owner', db.CASCADE) | true |
8c279f90d982a5220b161d13665bed7b193f3270 | Python | gholamifarshad98/Stereo_Vision_Camera | /stereo_cam_calibration/generate_stereo_calibration_remap_parameters.py | UTF-8 | 7,790 | 2.546875 | 3 | [] | no_license | import numpy as np # version: '1.14.0'
import cv2 # version: '3.1.0'
import glob
import pickle
import os
# Set root directory.
rootDir = os.getcwd()
print('\nrootDir = {}\n'.format(rootDir))
# Set parameter directory.
dir_calib_parameter = rootDir+'/output/computed_calibration_parameters/'
# Set original frames directory.
dir_original_L = rootDir+'/original_frames/l/'
dir_original_R = rootDir+'/original_frames/r/'
# Set calibration process frames directory.
dir_calib_process = rootDir+'/output/calibration_process_frames/'
# Frames resolution.
widthPixel = 800
heightPixel = 600
# The inner corner numbers of calibration chessboard.
Nx = 10
Ny = 7
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 50, 1e-6)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((Nx*Ny,3), np.float32)
objp[:,:2] = np.mgrid[0:Ny,0:Nx].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpointsL = [] # 2d points in image plane.
imgpointsR = [] # 2d points in image plane.
# Load original frames.
os.chdir(dir_original_L) # Change dir to the path of left frames.
imagesL = glob.glob('*.jpg') # Grab all jpg file names.
imagesL.sort() # Sort frame file names.
os.chdir(dir_original_R) # Change dir to the path of right frames.
imagesR = glob.glob('*.jpg') # Grab all jpg file names.
imagesR.sort() # Sort frame file names.
# Check if the number of images in two folders are same.
if len(imagesL) != len(imagesR):
print('Error: the image numbers of left and right cameras must be the same!')
exit()
n = 0
for i in range(len(imagesL)):
imgL = cv2.imread(dir_original_L + imagesL[i])
imgR = cv2.imread(dir_original_R + imagesR[i])
grayL = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
# Find the chess board corners for left camera.
retL, cornersL = cv2.findChessboardCorners(grayL,(Ny,Nx),None)
# Find the chess board corners for right camera.
retR, cornersR = cv2.findChessboardCorners(grayR,(Ny,Nx),None)
# If both are found, add object points, image points (after refining them)
if (retL and retR) == True:
n += 1
print('n = {}'.format(n))
objpoints.append(objp)
cornersL2 = cv2.cornerSubPix(grayL,cornersL,(11,11),(-1,-1),criteria)
imgpointsL.append(cornersL2)
cornersR2 = cv2.cornerSubPix(grayR,cornersR,(11,11),(-1,-1),criteria)
imgpointsR.append(cornersR2)
# Draw and display the corners
imgL = cv2.drawChessboardCorners(imgL, (Ny,Nx), cornersL2, retL)
imgR = cv2.drawChessboardCorners(imgR, (Ny,Nx), cornersR2, retR)
imgTwin = np.hstack((imgL, imgR))
cv2.imshow(imagesL[i] + '_' + imagesR[i], imgTwin)
cv2.waitKey(1)
cv2.destroyAllWindows()
# Save frame pairs.
cv2.imwrite(dir_calib_process + imagesL[i][:-6] + '_corners.jpg', imgTwin)
cv2.waitKey(1) # Wait program to close the last window.
# Calculate the camera matrix, distortion coefficients, rotation and translation vectors etc.
print('Calculating the camera matrix, distortion coefficients, rotation and translation vectors...')
retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(objpoints, imgpointsL, grayL.shape[::-1],None,None)
retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(objpoints, imgpointsR, grayR.shape[::-1],None,None)
print('Done.\n')
# Calculate reprojection errors.
print('Calculating reprojection errors...')
tot_errorL = 0
tot_errorR = 0
errorL = 0
errorR = 0
for i in xrange(len(objpoints)):
imgpointsL2, _ = cv2.projectPoints(objpoints[i], rvecsL[i], tvecsL[i], mtxL, distL)
imgpointsR2, _ = cv2.projectPoints(objpoints[i], rvecsR[i], tvecsR[i], mtxR, distR)
errorL = cv2.norm(imgpointsL[i],imgpointsL2, cv2.NORM_L2)/len(imgpointsL2)
errorR = cv2.norm(imgpointsR[i],imgpointsR2, cv2.NORM_L2)/len(imgpointsR2)
tot_errorL += errorL
tot_errorR += errorR
print 'mean error L: ', tot_errorL/len(objpoints)
print 'mean error R: ', tot_errorR/len(objpoints)
CamParasL = {'mtxL':mtxL, 'distL':distL, 'rvecsL':rvecsL, 'tvecsL':tvecsL}
CamParasR = {'mtxR':mtxR, 'distR':distR, 'rvecsR':rvecsR, 'tvecsR':tvecsR}
# Save calibration parameters.
print('Saving calibration parameters...')
pickle.dump(CamParasL, open(dir_calib_parameter+'CamParasL.p', 'wb') )
pickle.dump(CamParasR, open(dir_calib_parameter+'CamParasR.p', 'wb') )
print('Done.\n')
# Load the dictionary back from the pickle file.
#CamParasStereo = pickle.load( open( 'CamParasStereo.p', 'rb' ) )
CamParasL = pickle.load( open(dir_calib_parameter+'CamParasL.p', 'rb' ) )
CamParasR = pickle.load( open(dir_calib_parameter+'CamParasR.p', 'rb' ) )
CamParasL.viewkeys()
CamParasR.viewkeys()
# Restore calibration parameters from loaded dictionary.
mtxL = CamParasL['mtxL']
distL = CamParasL['distL']
rvecsL = CamParasL['rvecsL']
tvecsL = CamParasL['tvecsL']
mtxR = CamParasR['mtxR']
distR = CamParasR['distR']
rvecsR = CamParasR['rvecsR']
tvecsR = CamParasR['tvecsR']
# Stereo Calibration.
# cv2.stereoCalibrate Calculate the rectify parameters for stereo cameras.
print('Cacluating the rectify parameters for stereo cameras...')
retval, cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, rotationMatrix, translationVector, essentialMatrix, fundamentalMatrix = cv2.stereoCalibrate(objpoints,imgpointsL, imgpointsR, mtxL, distL, mtxR, distR, (widthPixel,heightPixel), criteria, flags=cv2.CALIB_FIX_INTRINSIC)
# cv2.stereoRectify Computes the rotation matrices for each camera that(virtually) make both image planes the same plane. The function takes the matrices computed by stereoCalibrate() as imput.
'''
Usage:
rotationMatrixL, rotationMatrixR, projectionMatrixL, projectionMatrixR, disp2depthMappingMatrix, validPixROI1, validPixROI2 = cv2.stereoRectify(cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, (widthPixel,heightPixel), rotationMatrix, translationVector, alpha=1, newImageSize=(0,0))
'''
rotationMatrixL, rotationMatrixR, projectionMatrixL, projectionMatrixR, disp2depthMappingMatrix, validPixROI1, validPixROI2 = cv2.stereoRectify(cameraMatrixL, distCoeffsL, cameraMatrixR, distCoeffsR, (widthPixel,heightPixel), rotationMatrix, translationVector, flags=cv2.CALIB_ZERO_DISPARITY, alpha=1, newImageSize=(0,0))
print('CALIB_ZERO_DISPARITY = ', cv2.CALIB_ZERO_DISPARITY)
print('rotationMatrixL = ')
print(rotationMatrixL)
print('rotationMatrixR = ')
print(rotationMatrixR)
print('projectionMatrixL = ')
print(projectionMatrixL)
print('projectionMatrixR = ')
print(projectionMatrixR)
print('disp2depthMappingMatrix = ')
print(disp2depthMappingMatrix)
print('validPixROI1 = ')
print(validPixROI1)
print('validPixROI2 = ')
print(validPixROI2)
# initUndistortRectifyMap
# cv2.CV_32FC1 is floating-point format map.
# cv2.CV_16SC2 is fixed-point format map, more compact and much faster.
'''
Usage:
initUndistortRectifyMap(...)
initUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type[, map1[, map2]]) -> map1, map2
'''
mapxL, mapyL = cv2.initUndistortRectifyMap(mtxL, distL, rotationMatrixL, projectionMatrixL, (widthPixel,heightPixel), cv2.CV_32FC1)
mapxR, mapyR = cv2.initUndistortRectifyMap(mtxR, distR, rotationMatrixR, projectionMatrixR, (widthPixel,heightPixel), cv2.CV_32FC1)
print(mapxL.shape)
print(mapyR.shape)
stereoRemap = {'mapxL':mapxL, 'mapyL':mapyL, 'mapxR':mapxR, 'mapyR':mapyR}
# Save calibration parameters.
print('Saving stereo remap matrices...')
pickle.dump(stereoRemap, open(dir_calib_parameter+'stereoRemap.p', 'wb' ) )
pickle.dump(disp2depthMappingMatrix, open(dir_calib_parameter+'disp2depth.p', 'wb'))
print('Done.\n')
exit()
| true |
1afc70e1e5c789b406fd5a29134af939fc7f21b6 | Python | Aasthaengg/IBMdataset | /Python_codes/p03549/s093548401.py | UTF-8 | 101 | 2.953125 | 3 | [] | no_license | [n,m] = [int(i) for i in input().split()]
x = (n-m) * 100 * (2**m)
x += m * (2 ** m) * 1900
print(x)
| true |
44d02288b1ceac3e8cd0a87c4ca47e379441e21c | Python | databar-team/redis-flask-docker | /app.py | UTF-8 | 694 | 2.90625 | 3 | [] | no_license | import os
from flask import Flask, request
from redis import Redis
app = Flask(__name__)
redis = Redis(host='redis', port=6379)
@app.route('/')
def hello():
return "Go on /put route to put data, and on /get to read your data"
@app.route('/put', methods=['GET', 'POST'])
def put():
if request.method == 'POST':
data = request.form["putdata"]
redis.append("ime2",data+" ")
return '''
<form method="post">
<p><input type=text name=putdata>
<p><input type=submit value=Putdata>
</form>
'''
@app.route('/get')
def get():
data = redis.get("ime2")
return data
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True) | true |
2ac956012e4c1aec24afd7404cf8cae5588feaa1 | Python | ThorkellTheTall/Violeth | /Combat.py | UTF-8 | 4,959 | 2.625 | 3 | [] | no_license | import pygame
from pygame.locals import *
from sorts import *
from Personnage import *
from mobs import *
from Barre_de_vie import *
from level import *
from afficheur import *
image = pygame.image.load("interface combat.png")
pygame.init()
pygame.mouse.set_visible(1)
global screen
screen = pygame.display.set_mode((800, 512))
stop = False
def affichage_tete(part1, part2, part3, key):
if pos_clef(key) == 0:
screen.blit(part1[1], part1[0])
elif pos_clef(key) == 1:
screen.blit(part2[1], part2[0])
elif pos_clef(key) == 2:
screen.blit(part3[1], part3[0])
""
def affichage_corps(part1, part2, part3, key):
if pos_clef(key) == 0:
screen.blit(part1[3], part1[2])
elif pos_clef(key) == 1:
screen.blit(part2[3], part2[2])
elif pos_clef(key) == 2:
screen.blit(part3[3], part3[2])
""
def affichage_pieds(part1, part2, part3, key):
if pos_clef(key) == 0:
screen.blit(part1[5], part1[4])
elif pos_clef(key) == 1:
screen.blit(part2[5], part2[4])
elif pos_clef(key) == 2:
screen.blit(part3[5], part3[4])
""
#boutons
COULEUR = (200, 191, 231) #couleur du remplissage
COLOR = (COULEUR)
bouton1 = pygame.Rect((253, 366), (271, 65)) #on créé un bouton 1
bouton2 = pygame.Rect((526, 366), (271, 65))
bouton3 = pygame.Rect((253, 432), (271, 65))
bouton4 = pygame.Rect((526, 432), (271, 65))
rect_surf1 = pygame.Surface(bouton1.size) #on donne une surface au bouton 1
rect_surf2 = pygame.Surface(bouton2.size)
rect_surf3 = pygame.Surface(bouton3.size)
rect_surf4 = pygame.Surface(bouton4.size)
rect_surf1.fill(COLOR) #on donne une couleur au bouton 1
rect_surf2.fill(COLOR)
rect_surf3.fill(COLOR)
rect_surf4.fill(COLOR)
print(randomob)
hp_randomob = vie(level, randomob[1])
print(hp_randomob)
hp1 = 400 #hp max du perso
hp2 = 1000 #hp max du monstre
vie = 1000
while not stop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
stop = True
elif event.type == MOUSEBUTTONUP: #quand je relache le bouton
if event.button == 1: #1= clique gauche
if bouton1.collidepoint(event.pos): #si on appuie sur le bouton 1
vie = dégats(lsorts[0], vie, lresfixe[0], lres[0])
ratio2 = pertepv(hp2, vie) #on obtient un ratio de pv perdus
width2 = ratio2*183 #on modifie la largeur de la barre en fonction des pv perdus
width2 = round(width2) #arrondi (car smoothscale prend que des int)
rect_surf7 = pygame.transform.smoothscale(rect_surf7, (width2, 11)) #on applique la modif
elif bouton2.collidepoint(event.pos):
vie = dégats(lsorts[1], vie, lresfixe[1], lres[1])
ratio2 = pertepv(hp2, vie)
width2 = ratio2*183
width2 = round(width2)
rect_surf7 = pygame.transform.smoothscale(rect_surf7, (width2, 11))
elif bouton3.collidepoint(event.pos):
vie = dégats(lsorts[2], vie, lresfixe[2], lres[2])
ratio2 = pertepv(hp2, vie)
width2 = ratio2*183
width2 = round(width2)
rect_surf7 = pygame.transform.smoothscale(rect_surf7, (width2, 11))
elif bouton4.collidepoint(event.pos):
vie = stasis(lsorts, lresfixe, lres, vie)
hperso = hperso/100
hperso = hperso - (hperso*0.1)
hperso = hperso * 100
ratio1 = pertepv(hp1, hperso)
ratio2 = pertepv(hp2, vie)
width1 = ratio1*183
width1 = round(width1)
width2 = ratio2*183
width2 = round(width2)
rect_surf6 = pygame.transform.smoothscale(rect_surf6, (width1, 11))
rect_surf7 = pygame.transform.smoothscale(rect_surf7, (width2, 11))
# affiche les boutons
screen.blit(rect_surf1, bouton1)
screen.blit(rect_surf2, bouton2)
screen.blit(rect_surf3, bouton3)
screen.blit(rect_surf4, bouton4)
# affiche le fond
screen.blit(image, (0, 0))
# affiche les barres de vie
screen.blit(rect_surf6, barre1)
screen.blit(rect_surf7, barre2)
# affiche le monstre
affichage_tete(skull, arc, war, clefs_tetes)
affichage_corps(skull, arc, war, clefs_corps)
affichage_pieds(skull, arc, war, clefs_pieds)
pygame.display.flip()
pygame.quit() | true |
1c85a0a576082e4154f1b63e40c1186be45d9de2 | Python | Descent098/projects-experiments | /Languages/Python/snippits/string_similarity.py | UTF-8 | 2,076 | 3.5 | 4 | [
"MIT"
] | permissive | # See https://gist.github.com/Descent098/dae85d0235acce5322bf1277d1372a7e
from difflib import SequenceMatcher
# Faster when used with python-Levenshtein, but causes some issues
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
##################### Single word similarity ######################################
def similar_strings(to_compare:str, to_match:str) -> float:
"""Takes in two strings and returns a float of the percentage they are similar to each other
Parameters
----------
to_compare : str
The string you want to compare
to_match : str
The string you want to compare against
Returns
-------
float
The ratio of the similarity between to strings
"""
# Remove excess whitespace
to_compare = to_compare.strip()
to_match = to_match.strip()
return SequenceMatcher(None, to_compare, to_match).ratio()
print(fuzz.ratio("biiiild", "build"))
##################### Word suggestion ######################################
def suggest_word(input_word:str, word_list:str) -> str:
"""Takes in a string and a list of words and returns the most likely word
Parameters
----------
input_word : str
The word you want to check for similarity
word_list : str
The list of words to test input_word against for similarity
Returns
-------
str
The most similar word, can also be empty string if none had more than %10 similarity
"""
similarities = {}
for current_word in word_list:
similarities[current_word] = similar_strings(input_word, current_word)
similarities = dict(sorted(similarities.items(),key=lambda x:x[1], reverse=True))
print(similarities)
if list(similarities.values())[0] <= 0.1: # If the most likely suggestion has less than %10 likelyhood
return ""
for word in similarities:
return word # Return first word in dictionary
print(suggest_word("biiild", ["build", "init", "preview"]))
print(process.extract("biiild", ["build", "init", "preview"], limit=2)) | true |
f917f98f59de324fa7cc04a9e26e8ee9e7b02c4e | Python | dlatnrud/pyworks | /media_turtle/move_random.py | UTF-8 | 258 | 3.484375 | 3 | [] | no_license | # 마음대로 걷는 거북이
import turtle as t
import random
t.shape('turtle')
t.bgcolor('pink')
t.color('blue')
t.speed(5)
for x in range(300):
angle = random.randint(1, 360) # 1 ~ 360도 랜덤한 각도
t.setheading(angle)
t.forward(10) | true |
ace290279810c49d135c771135cb48e9d66de17e | Python | adelamegaa/Quiz-4 | /Quiz43_Adela Mega Aglina_1201184258.py | UTF-8 | 233 | 3.609375 | 4 | [] | no_license | ListGPA = [2.1, 2.5, 4, 3]
def Hadiah (GPA):
Bonus = 500000
Hadiah = GPA * Bonus
return Hadiah
for GPA in ListGPA:
if GPA > 2.5:
print('Hadiah : Rp ', Hadiah (GPA))
else:
print('Maaf') | true |
85d3d770290c89d6961b376b22ec80e0a133095b | Python | ChaofeiLiu/D.S | /DS_sort_set/binarysearch.py | UTF-8 | 1,015 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env/ python
# -*— coding=utf-8 -*-
# @FileName :binarysearch.py
# @Time :2020/8/16
# @Author: chaofei_liu
# 普通实现
def binarysearch(alist,item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
midpoint = (first+last) // 2
if alist[midpoint] == item:
found = True
else:
if alist[midpoint] > item:
last = midpoint - 1
else:
first = midpoint + 1
return found
# 递归写法
def binarysearch2(alist,item):
if len(alist) == 0:
return False
else:
midpoint = len(alist) // 2
if alist[midpoint] == item:
return True
else:
if alist[midpoint] < item:
return binarysearch2(alist[midpoint+1:],item)
else:
return binarysearch2(alist[:midpoint],item)
inputlist = [1,2,3,5,6,8,9]
print(binarysearch(inputlist,11))
print(binarysearch2(inputlist,10))
| true |
3d96ab92b306088145c0a74855e9ebd8aba1ff2c | Python | AndrewBatty/Iteration | /Iteration_Development_Exercise2.py | UTF-8 | 261 | 3.734375 | 4 | [] | no_license | # Andrew Batty
# 29/10/14
# Development exercise 2
stars = int(input("Please enter the number of stars per row you would like: "))
rows = int(input("Please enter the number of rows you would like: "))
for count in range(rows):
print("*" * stars)
| true |
8f2a6902d6e0f1a1c6173c2789e6f1420a0e2763 | Python | xtreezzz/msp_lesson2 | /if_string.py | UTF-8 | 477 | 3.78125 | 4 | [] | no_license | def string_checker(_s1, _s2):
if isinstance(_s1, str) and isinstance(_s2, str):
if _s1 == _s2:
return 1
elif len(_s1) > len(_s2):
return 2
elif _s2 == 'learn':
return 3
else:
# Недостоющиее условие
return 4
else:
return 0
s1 = input('Строчка 1 ')
s2 = input('Строчка 2 ')
output_result = string_checker(s1, s2)
print(output_result)
| true |
abe126c148b6bb23f3ba17a496b1dd87f8e351e4 | Python | rstreppa/algorithms-Basics | /DataStructures/test_queue.py | UTF-8 | 665 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@date: Tue Jan 11 21:56:07 2022
@author: rstreppa ( roberto.strepparava@gmail.com )
@company: https://github.com/rstreppa
@type: test
@description: test file for script C:/Users/rober/OneDrive/Documents/Library/Programming/Data_Structures/Queue/queue.py
"""
from queue import Queue
def main():
q = Queue(maxsize = 10)
q.put( 1 )
q.put( 2 )
q.put( 3 )
q.put( 4 )
q.put( 5 )
print(q.qsize())
print('######################')
print(q.get())
print(q.get())
print(q.get())
print("\nEmpty: ", q.empty())
print(q)
print('######################') | true |
cc13c924afea8d14bd511b57c36821114f371bab | Python | okiki-oliyide/aws_projects | /shows/streamer_rule_config_builder.py | UTF-8 | 1,460 | 2.609375 | 3 | [] | no_license | import json
channel_count = 40
udp_start_emx = 20001
cameras_list = []
streams_list = []
stream_source = [ "emx" , "eml" ]
# for each channel :
port_increment = 0
for channel_number in range(0,channel_count):
for source_type in stream_source:
if source_type == "emx":
udp_start = udp_start_emx
port_number = udp_start + port_increment
stream_name = port_number
else:
udp_start = udp_start_emx + 100
port_number = udp_start + port_increment
stream_name = port_number - 100
cameras = dict()
cameras["id"] = "%03d-%s-%s" % (channel_number,port_number,"in")
cameras["ip"] = "0.0.0.0"
cameras["port"] = port_number # this is incrementing
cameras["protocol"] = "udp"
streams = dict()
streams["id"] = "%03d%s" % (channel_number,"-out")
streams["video"] = {}
streams["video"]["cam"] = cameras['id'] # this reference camera ID
streams["video"]["pid"] = 0
streams["audio"] = {}
streams["audio"]["cam"] = cameras['id'] # this reference camera ID
streams["audio"]["pid"] = 0
streams["app"] = source_type
streams["stream"] = stream_name # this is stream name and will be part of playback url
cameras_list.append(cameras)
streams_list.append(streams)
port_increment += 1
print(json.dumps(cameras_list))
print(json.dumps(streams_list)) | true |
7420b14764cc71f08b851c464156328af9753c2e | Python | FrancescoGobbo/Python | /libreriaPython.py | UTF-8 | 268 | 4 | 4 | [] | no_license | #questo programma ci fornisce 10 numeri casuali tra 1 e 50
import random
from math import sqrt
for numero in range(10):
valore=random.randint(1,50)
print(valore)
print("\nValore: " + str(valore))
k=sqrt(valore)
print("\nValore SQRT: " + str(k))
| true |
886384daedb97de230e8ff33c72da1faafec912e | Python | nombreinvicto/OpenCV | /ocv_mallick/1.Course1-Intro/Project2_MouseEventHandling/submission_trackbar.py | UTF-8 | 2,634 | 3.140625 | 3 | [] | no_license | import cv2
import numpy as np
# resize function
def resize(image: np.ndarray, target_size=(400, 400)):
return cv2.resize(image, dsize=target_size)
# putText function
def put_text(image: np.ndarray):
image_height = image.shape[0]
cv2.putText(image, 'Hit ESC to close Window',
(10, image_height - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (255, 255, 255), 2)
# define the callbacks
def scale_factor_change_cb(*args):
global default_scale_factor, \
default_scale_type, \
image, \
init_image, \
current_scale_type
# get the scale factor from trackbar
scaleFactor = 1 + ((-1) ** current_scale_type) * (args[0] / 100.0)
print("Scale Factor: ", scaleFactor)
# Perform check if scaleFactor is zero
if scaleFactor == 0:
scaleFactor = 1
# Resize the image
scaledImage = cv2.resize(init_image,
None,
fx=scaleFactor,
fy=scaleFactor,
interpolation=cv2.INTER_LINEAR)
image = scaledImage.copy()
print("Current Shape: ", image.shape[:2])
def scale_type_change_cb(*args):
global current_scale_type, init_image
current_scale_type = args[0]
# resize the target image to track image size history
current_image_shape = image.shape[:2]
# init_image = resize(init_image, target_size=current_image_shape)
# initialise global variables
window_name = 'Output'
# scale factor change trackbar
scale_factor_tb_name = 'Scale'
max_scale_factor = 100
default_scale_factor = 0
# scale up/down change trackbar
scale_type_tb_name = f'Scale Type'
max_scale_type = 1
default_scale_type = 0
current_scale_type = default_scale_type
# load the image to be shown
image = cv2.imread('sample.jpg')
image = resize(image, target_size=(600, 600))
init_image = image.copy()
# create a window to display results
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
# create the trackbar objects
# 1. the scale factor change trackbar
cv2.createTrackbar(scale_factor_tb_name,
window_name,
default_scale_factor,
max_scale_factor,
scale_factor_change_cb)
# 2. the scale type change trackbar
cv2.createTrackbar(scale_type_tb_name,
window_name,
default_scale_type,
max_scale_type,
scale_type_change_cb)
k = 0
# loop until escape character is pressed
while k != 27:
cv2.imshow('Output', image)
put_text(image)
k = cv2.waitKey(25)
cv2.destroyAllWindows()
| true |
cca40679b6e368b862143f055c6e344ccf3cae7c | Python | gunyarakun/nicotagmap | /png2tile.py | UTF-8 | 415 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import math
import Image, ImageDraw, ImageFont
print sys.argv[1]
img = Image.open(sys.argv[1])
width, height = img.size
cols = int(math.ceil(width / 256.0))
rows = int(math.ceil(height / 256.0))
for y in range(rows):
for x in range(cols):
tile = img.crop((x * 256, y * 256, (x + 1) * 256, (y + 1) * 256))
tile.save('imgimg_%d_%d.png' % (x, y))
| true |
00f22eac5f9a017afe03ea3d4d7e88d221b634aa | Python | tpt5cu/python-tutorial | /language/python_369/python_369/popular_modules/tempfile_/introduction.py | UTF-8 | 3,491 | 3.796875 | 4 | [] | no_license | # https://docs.python.org/3.7/library/tempfile.html
import tempfile, pathlib, os
this_dir = pathlib.Path(__file__).parent
'''
- TemporaryFile, NamedTemporaryFile, TemporaryDirectory, and SpooledTemporaryFile are high-level interfaces whose resources can be automatically
removed when the script is finished
- mkstemp() and mkdtemp(): low-level interfaces whose resources require manual cleanup
- TemporaryFile, NamedTemporaryFile, SpooledTemporaryFile
- All have a default open mode of "w+b"
- w+ is to allow writing and reading without closing the file
- Binary mode is to ensure consistent behaivor on all platforms without regard to the data that is being stored
- I don't think I need to care about this
- "buffering", "encoding", and "newline" have the same meaning as for open()
'''
def mkstemp_():
'''
Creates a temporary file securely. Returns (<os-level file handle>, <absolute filename>)
- File is not automatically deleted
- "suffix", "prefix", and "dir" control the filename and file location
- "text" == False opens the file in binary mode (default)
'''
handle, filename = tempfile.mkstemp(suffix='.temp', dir=this_dir)
print(filename)
os.write(handle, b'This is a temporary file!') # os.write() requires a byte string
os.close(handle)
def temporary_file():
'''
The file will be destroyed as soon as it is closed. As a simplification: do not rely on the filesystem being able to see or not see that this file
existed.
- The temporary file does not show up in the filesystem according to os.scandir()
- The "name" attribute of this type of temporary file is a number, so it's not very helpful
'''
with tempfile.TemporaryFile('w+', suffix='.mytemp', dir=this_dir) as f:
print(type(f)) # <class '_io.TextIOWrapper'>
#with tempfile.TemporaryFile(suffix='.mytemp', dir=this_dir) as f:
# print(type(f)) # <class '_io.BufferedRandom'>
for entry in os.scandir(this_dir):
print(entry.path)
f.write('Hello from TemporaryFile()')
def optionally_deleted_named_temporary_file():
'''
Exactly the same as TemporaryFile, except that
- "delete" == True (default) controls whether or not the file will be deleted
- The file WILL have a visible name in the file system
- See it with the "name" attribute
'''
with tempfile.NamedTemporaryFile('w', suffix='.visibletemp', dir=this_dir, delete=True) as f:
#for entry in os.scandir(this_dir):
# print(entry.path)
f.write('Hello from NamedTemporaryFile()')
print(f.name) # /Users/austinchang/tutorials/python/language/python_369/popular_modules/tempfile_/tmph5kyfy36.visibletemp
def mkdtemp_():
'''Parameters are exactly the same as mkstemp. Directory is not automatically deleted. Returns the absolute path of the directory'''
temp_dir = tempfile.mkdtemp(dir=this_dir)
print(temp_dir)
def deleted_temporary_directory():
'''
Same as mkdtemp, except that
- It can be used as a context manager
- The directory will be automatically deleted
- The absolute path of the temporary directory is returned as the alias of the with-statement
'''
with tempfile.TemporaryDirectory(dir=this_dir) as d:
print(d)
if __name__ == '__main__':
#mkstemp_()
#temporary_file()
optionally_deleted_named_temporary_file()
#mkdtemp_()
#deleted_temporary_directory()
| true |
c2538eabaae13e128e64079a37259309bd5d2f16 | Python | LaPetiteBiche/take-home_exam1_prog | /HT1_ArthurBonetti/Question_3/question_3c.py | UTF-8 | 106 | 3 | 3 | [] | no_license | from Polar_Class import *
p1 = Polar(1,1)
print(p1)
p2 = Polar(7,3)
print(p2)
p3 = Polar(12,23)
print(p3) | true |
6a87579a35cfb25f1ad6ae2af6fb2fafd9f8398d | Python | zhaoweikid/zbase3 | /web/cache.py | UTF-8 | 6,201 | 2.90625 | 3 | [] | no_license | # coding: utf-8
import os, sys
import time
import traceback
import types
import logging
log = logging.getLogger()
# 两种缓存模式
# 1. 所有缓存key共用同一个更新函数
# 2. 缓存为每个key都设置一个更新函数
class Cache (object):
def __init__(self, func=None, timeout=10):
self._cache = {}
# 以下为缓存模式1所用,只有模式1才需要
self._func = func
self._timeout = timeout
def add(self, key, update_func, timeout):
if key:
# 以下为模式2
item = {'key':key, 'func':update_func, 'timeout':timeout, 'last':0, 'data':None}
self._cache[key] = item
else:
# 以下为模式1
self._func = func
self._timeout = timeout
def exist(self, key):
return key in self._cache
def remove(self, key):
if key in self._cache:
self._cache.pop(key)
def update(self, key, *args, **kwargs):
item = self._cache.get(key)
if not item:
return
now = time.time()
data = item['func'](key, item['data'], item, *args, **kwargs)
item['data'] = data
item['last'] = now
return data
def __call__(self, key, _refresh=False, *args, **kwargs):
item = self._cache.get(key)
if not item:
if not self._func:
return
item = {'func':self._func, 'timeout':self._timeout, 'last':0, 'data':None}
self._cache[key] = item
data = item['data']
now = time.time()
if _refresh or now-item['last'] >= item['timeout']:
data = self.update(key, *args, **kwargs)
return data
# 这是第2种缓存
caches = Cache()
# 给类方法用的
def with_cache(timeout):
def f(func):
fpath = os.path.abspath(__file__)
funcname = func.__name__
def cache_wrap(key, value, info, *args, **kwargs):
return func(*args, **kwargs)
def _(*args, **kwargs):
classname = args[0].__class__.__name__
key = 'c_%s_%s_%s' % (fpath, classname, funcname)
global caches
if not caches.exist(key):
caches.add(key, cache_wrap, timeout)
return caches(key, False, *args, **kwargs)
return _
return f
# 只能给独立的function使用
def with_cache_func(timeout):
def f(func):
fpath = os.path.abspath(__file__)
funcname = func.__name__
def cache_wrap(key, value, info, *args, **kwargs):
return func(*args, **kwargs)
def _(*args, **kwargs):
key = 'c_%s_%s' % (fpath, funcname)
global caches
if not caches.exist(key):
caches.add(key, cache_wrap, timeout)
return caches(key, False, *args, **kwargs)
return _
return f
def test_2():
#import inspect
#print('-'*6, inspect.stack()[0].function, '-'*6)
def func1(key, value, info):
return 'name-%.3f' % time.time()
def func2(key, value, info):
return 'count-%.3f' % time.time()
global caches
#caches = Cache()
caches.add('name', func1, 0.3)
caches.add('count', func2, 0.3)
for i in range(0, 3):
print(caches('name'))
print(caches('count'))
time.sleep(.2)
time.sleep(.1)
print("refresh:", caches('name', True))
time.sleep(.1)
print("refresh:", caches('name', True))
time.sleep(.1)
print("refresh:", caches('name', True))
time.sleep(.1)
print(caches('name'))
def test_1():
#import inspect
#print('-'*6, inspect.stack()[0].function, '-'*6)
def func1(key, value, info):
return '%s-%.3f' % (key, time.time())
c = Cache(func1, 0.3)
for i in range(0, 3):
print(c('haha1'))
print(c('haha2'))
time.sleep(.2)
time.sleep(.1)
v1 = c('haha1', True)
print("refresh:", v1)
time.sleep(.1)
v2 = c('haha1', True)
print("refresh:", v2)
time.sleep(.1)
assert v1 != v2
v3 = c('haha1', True)
print("refresh:", v3)
time.sleep(.1)
assert v3 != v2
print(c('haha1'))
def test_decorator_class():
#import inspect
#print('-'*6, inspect.stack()[0].function, '-'*6)
class Test1 (object):
def test(self, name):
return 'test1-%s-%f' % (name, time.time())
class Test2 (object):
@with_cache(0.2)
def test(self, name):
return 'test2-%s-%f' % (name, time.time())
t1 = Test1()
t2 = Test2()
last_v1 = 0
last_v2 = 0
last_v21 = 0
for i in range(0, 3):
v1 = t1.test(str(i))
v2 = t2.test(str(i))
v21 = t2.test(name=str(i))
print('Test1:', v1)
print('Test2:', v2)
print('Test2:', v21)
assert v2 == v21
if i == 1:
assert v2 == last_v2
assert v21 == last_v21
if i == 2:
assert v2 != last_v2
assert v21 != last_v21
last_v1 = v1
last_v2 = v2
last_v21 = v21
time.sleep(0.1)
def test_decorator_func():
#import inspect
#print('-'*6, inspect.stack()[0].function, '-'*6)
def test1(name):
return 'test1-%s-%f' % (name, time.time())
@with_cache_func(0.2)
def test2(name):
return 'test2-%s-%f' % (name, time.time())
last_v1 = 0
last_v2 = 0
last_v21 = 0
for i in range(0, 3):
v1 = test1(str(i))
v2 = test2(str(i))
v21 = test2(name=str(i))
print('Test1:', v1)
print('Test2:', v2)
print('Test2:', v21)
assert v2 == v21
if i == 1:
assert v2 == last_v2
assert v21 == last_v21
if i == 2:
assert v2 != last_v2
assert v21 != last_v21
last_v1 = v1
last_v2 = v2
last_v21 = v21
time.sleep(0.1)
if __name__ == '__main__':
fs = list(globals().keys())
for k in fs:
if k.startswith('test_'):
print('-'*6, k, '-'*6)
globals()[k]()
| true |
13de5563be6926a7302e2e86c18dc93bbbcb968a | Python | janakparmar9491/The-Complete-Python-Masterclass-Learn-Python-From-Scratch | /2-Control-Structure-In-Python/2-Intro-List.py | UTF-8 | 165 | 3.515625 | 4 | [] | no_license | people = ["Janak","Kuldip","Jakey","Ashok","Vijay"]
print(people[2])
print(people[0:3])
num = [1,2,3,4,5]
print(num)
a = [] ###define list just like array
print(a) | true |
02a1f72b8ab33d359f6add2e9886815b844929ba | Python | sadiqulislam/Python-Practice-All- | /globalvariableKeyword.py | UTF-8 | 73 | 3.15625 | 3 | [] | no_license | g = 10
def rise(m):
l = 10
b = g + l
print(b,m)
rise("We") | true |
e5457e2238d350ed473608e2ba0d8235b00ef883 | Python | HubertSiewior/Python-AGH-Projects | /software engineering - project/tests/test_get_module_usage.py | UTF-8 | 2,618 | 2.828125 | 3 | [] | no_license | import os
import unittest
from H1 import get_module_usage as gmu
class TestStringMethods(unittest.TestCase):
def test_get_node_data(self):
path_dir = '../test_dir2'
os.mkdir(path_dir)
path1 = os.path.join(path_dir, 'test_file1.py')
f1 = open(path1, "w+")
f1.write("# !/usr/bin/env python\n")
f1.write("# -*- coding: utf-8 -*-\n")
f1.write("\n\n")
f1.write("import os\n")
f1.write("\n\n")
f1.write("def testa():\n")
f1.write("\tpass")
f1.write("\n\n")
f1.write("def testb():\n")
f1.write("\ttesta()\n")
f1.write("\n\n")
f1.close()
path2 = os.path.join(path_dir, 'test_file2.py')
f2 = open(path2, "w+")
f2.write("# !/usr/bin/env python\n")
f2.write("# -*- coding: utf-8 -*-\n")
f2.write("\n\n")
f2.write("import os\n")
f2.write("\n\n")
f2.write("def testc():\n")
f2.write("\tpass")
f2.write("\n\n")
f2.write("def testd():\n")
f2.write("\ttestc()\n")
f2.write("\n\n")
f2.write("def main(argument_path=None):\n")
f2.write("\tteste()\n")
f2.write("\n\n")
f2.write("if __name__ == '__main__':\n")
f2.write("\tmain()\n\n")
f2.close()
files_with_modules = gmu.get_files_with_modules(path_dir, False)
nodes = gmu.get_node_data(path_dir, files_with_modules, False)
expected_result_1 = ['test_file1.py', 'test_dir2', 105, True, '../test_dir2/test_file1.py', ['testa', 'testb'], [[None, 'os', None, '/usr/lib/python3.5/os.py']], ['testa'], [None, 0]]
expected_result_2 = ['test_file2.py', 'test_dir2', 182, True, '../test_dir2/test_file2.py', ['main', 'testc', 'testd'], [[None, 'os', None, '/usr/lib/python3.5/os.py']], ['teste', 'main', 'testc'], [0, None]]
self.assertEqual(nodes[0].to_array(), expected_result_1)
self.assertEqual(nodes[1].to_array(), expected_result_2)
os.remove(path1)
os.remove(path2)
os.rmdir(path_dir)
def test_to_simple_array(self):
file_node = gmu.FileNode("name", "module_name", 2, None, None, None, None, None, None)
array = file_node.to_simple_array()
self.assertEqual(array, ["name", 2, None])
def test_to_array(self):
file_node = gmu.FileNode("name", "module_name", 2, None, None, None, None, None, None)
array = file_node.to_array()
self.assertEqual(array, ["name", "module_name", 2, None, None, None, None, None, None])
if __name__ == '__main__':
unittest.main()
| true |
cf94fba585717e105b9bb2087c07596d895e0534 | Python | MrPx/1521006homework | /1520873-实验三/testHello.py | UTF-8 | 596 | 2.671875 | 3 | [] | no_license | import unittest
from hello import User
from hello import db
class TestHello(unittest.TestCase):
def setUp(self):
db.create_all()
def tearDown(self):
db.drop_all()
def test_creatUser(self):
a=User('shenqianqian')
self.assertEqual(a.username,'shenqianqian')
def test_userAge(self):
b=User('shenqianqian')
self.assertEqual(b.age,20)
def test_userAgeInput(self):
c=User('shenqianqiann',24)
self.assertEqual(c.age,24)
if __name__ == '__main__':
unittest.main()
| true |
21a884cafccf86053831561a183c2fbdd6ab45d8 | Python | sid597/Nand-To-Games | /projects/08/directoryTranslator.vm.py | UTF-8 | 903 | 2.515625 | 3 | [] | no_license | import os
import inspect
class Parser:
def __init__(self, dir_name):
self.dir_name = dir_name
def parse_directory(self):
for dirpath, dirnames, files in os.walk(self.dir_name):
dir_name = dirpath.split('/')[-1]
for file_name in files:
if file_name.endswith('.vm'):
file_path = (os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) + "/" + \
dirpath + "/" + file_name
with open(file_path, 'r') as vm_file_code:
parser = Parser(vm_file_code, file_path)
parsed_file, command_type_list = parser.parse_asm()
code_writer = Code_Writer(file_path, parsed_file, command_type_list)
code_writer.open_file_and_write()
vm_file_code.close() | true |
0fdaa85ba85d159ee63797357480c004ae5d50f4 | Python | bcogrel/webid-delegated-auth | /webid_delegated_auth/exceptions.py | UTF-8 | 3,704 | 2.59375 | 3 | [
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | """
Exceptions.
Compatibility with those declared in https://auth.my-profile.eu/
"""
NO_CLAIM_CODE = "noClaim"
NO_CERT_CODE = "nocert"
CERT_NO_OWNERSHIP_CODE = "certNoOwnership"
REJECTED_CLAIM_CODE = "rejectedClaim"
CERT_WITHOUT_URI_CODE = "noURI"
EXPIRED_CERT_CODE = "certExpired"
UNDECLARED_CERT_CODE = "noVerifiedWebId"
NOT_A_WEBID_CODE = "noWebId"
IDP_ERROR_CODE = "IdPError"
class AuthException(Exception):
"""
Abstract Auth exception
"""
pass
class UserAuthException(AuthException):
"""
Exception due to the user (not the auth service)
"""
pass
class NoClaimException(UserAuthException):
"""
The user gave no assertion to prove
its identity.
Happens when the user wants to stay
anonymous.
New and generic
Code: noClaim
"""
class NoCertException(NoClaimException):
"""
The client did not provide a certificate.
Specific to WebID-TLS.
Code: nocert
"""
pass
class CertNoOwnershipException(NoCertException):
"""
The cert and its private key does not match.
Strange mesage that may happen when the user
refuses to show its cert.
Specific to WebID-TLS
Code: certNoOwnership
"""
pass
class RejectedClaimException(UserAuthException):
"""
The user provided a claim
but it has been rejected for some reasons
(impossible to verify, wrong, expired, etc.)
New! generic
Code: rejectedClaim
"""
pass
class CertWithoutUriException(RejectedClaimException):
"""
The user cert does not contain an URI.
The other claim is not considered.
Specific to WebID-TLS
Code: noURI
"""
pass
class ExpiredUserCertException(RejectedClaimException):
"""
The client used a certificate that is expired
Specific to WebID-TLS
Code: certExpired
TODO: get the expiration date
"""
pass
class UndeclaredCertException(RejectedClaimException):
"""
No entry for the user cert has been found
in the WebID profile document.
Specific to WebID-TLS
Code: noVerifiedWebId
"""
pass
class NotAWebIDException(RejectedClaimException):
"""
The given URI is not a WebID (no profile
document has been found)
Code: noWebId
"""
class IdPException(RejectedClaimException):
"""
Error with the Identity Provider
Code: IdPError
"""
pass
class AuthServiceError(AuthException):
"""
Error while interacting with the Auth Service
"""
pass
class InvalidServiceURLError(AuthServiceError):
""" HTTPS is required """
pass
class InvalidCallbackURLError(AuthServiceError):
""" If HTTPS is required on the client-side (recommended) """
pass
class RejectedAuthURLError(AuthServiceError):
pass
class IncompleteAuthURLError(RejectedAuthURLError):
""" WebID, ts and sig query entries are required
"""
pass
class InvalidSignatureError(RejectedAuthURLError):
""" The string $authreqissuer?webid=$webid&ts=$timeStamp has not
signed by the expected certificate """
pass
class ExpiredAuthURLError(RejectedAuthURLError):
""" Too old timestamp
"""
def __init__(self, expired_since):
RejectedAuthURLError.__init__(self, "Auth URL has expired %d seconds ago"
% expired_since)
# In seconds
self.expired_since = expired_since
class UnsyncClockError(RejectedAuthURLError):
"""
When the negative time is too important
"""
pass
| true |
d958fef1388426fc3e4f7703dc8e066f49b8fba2 | Python | alfredo-gimenez/sos_flow | /src/soapy/draw.py | UTF-8 | 2,534 | 2.78125 | 3 | [
"LicenseRef-scancode-other-permissive"
] | permissive | import os
import sys
import sqlite3
import numpy as np
import pylab as pl
from data_utils import is_outlier
# name of the sqlite database file
sqlite_file = os.environ.get("SOS_LOCATION", ".") + "/" + sys.argv[1]
table_name = 'tblvals' # name of the table to be queried
print("Connecting to: ", sqlite_file)
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
#sql_statement = (" SELECT "
# " tblvals.row_id, tbldata.name, tblvals.val, tblvals.time_pack, tblvals.time_send, tblvals.time_recv "
# " FROM "
# " (tblvals INNER JOIN tbldata ON tblvals.guid = tbldata.guid) "
# " WHERE "
# " tblvals.guid "
# " IN (SELECT guid FROM tbldata WHERE tbldata.name LIKE 'cpu_avg%') "
# " AND "
# " tblvals.rowid < 100; ")
sql_statement = (" SELECT "
" tblvals.row_id, tbldata.name, tblvals.val, tblvals.time_pack, tblvals.time_send, tblvals.time_recv "
" FROM "
" (tblvals INNER JOIN tbldata ON tblvals.guid = tbldata.guid) "
";")
#" WHERE "
#" tblvals.row_id > 10000000 "
#" AND "
#" tblvals.row_id < 20000000;")
print("Executing query: ", sql_statement)
c.execute(sql_statement);
print("Fetching rows.")
all_rows = c.fetchall()
print("Making numpy array of: pack_time")
pack_time = np.array([x[3] for x in all_rows])
print("Making numpy array of: latencies")
latencies = np.array([(x[5] - x[4]) for x in all_rows])
print("len(pack_time) == ", len(pack_time))
print("len(latencies) == ", len(latencies))
print("Skipping outlier-filter stage.")
#filtered_pack_time = pack_time[~is_outlier(latencies)]
#filtered_latencies = latencies[~is_outlier(latencies)]
print("Plotting: x=pack_time, y=latencies")
pl.title("Latency Between Client SOS_publish() and Daemon DB Insert");
pl.plot(pack_time, latencies)
pl.ylabel("Latency (sec.)")
pl.xlabel("Timestamp When Client Sent Value to Daemon (sec. from earliest value)")
print("Showing plot...")
pl.show()
print("Closing connection to database.")
# Closing the connection to the database file
conn.close()
print("Done.")
# ----------
# Reference:
#
# Make an array of x values
#x = [1, 2, 3, 4, 5]
# Make an array of y values for each x value
#y = [1, 4, 9, 16, 25]
# use pylab to plot x and y
#pl.plot(x, y)
# show the plot on the screen
#pl.show()
#end
| true |
8dd5b717d5758d93183bc90d9e3a6c64dee6ceae | Python | MimiBambino/MachineLearning | /poi_id.py | UTF-8 | 8,911 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import test_classifier, dump_classifier_and_data
from sklearn.preprocessing import MinMaxScaler
#from sklearn.pipeline import Pipeline
from sklearn.decomposition import RandomizedPCA
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
#from sklearn.cross_validation import StratifiedShuffleSplit
#from sklearn.metrics import precision_score, recall_score, accuracy_score
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
features_list = ['poi',
'bonus',
'deferral_payments',
'deferred_income',
'director_fees',
'exercised_stock_options',
'expenses',
'loan_advances',
'long_term_incentive',
'restricted_stock',
'restricted_stock_deferred',
'salary',
'total_payments',
'total_stock_value',
'other']
### Load the dictionary containing the dataset
data_dict = pickle.load(open("final_project_dataset.pkl", "r") )
### Task 2: Remove outliers
# Remove observations that are not people.
data_dict.pop('TOTAL', 0)
data_dict.pop('THE TRAVEL AGENCY IN THE PARK', 0)
# Remove person with 'NaN' values for every feature.
data_dict.pop('LOCKHART EUGENE E')
### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
my_dataset = data_dict
# New features to epresent the proportion of emails to or from a POI.
for person in my_dataset:
if my_dataset[person]['to_messages'] == 'NaN' or my_dataset[person]['from_this_person_to_poi'] == 'NaN':
my_dataset[person]['to_poi_ratio'] = 'NaN'
else:
my_dataset[person]['to_poi_ratio'] = float(my_dataset[person]['from_this_person_to_poi'])/float(my_dataset[person]['to_messages'])
for person in my_dataset:
my_dataset[person]['from_poi_ratio'] = 'NaN'
if my_dataset[person]['from_messages'] != 'NaN' and my_dataset[person]['from_poi_to_this_person'] != 'NaN':
my_dataset[person]['from_poi_ratio'] = float(my_dataset[person]['from_poi_to_this_person'])/float(my_dataset[person]['from_messages'])
features_list.append('to_poi_ratio')
print "To POI Ratio added to features_list.", "\n"
features_list.append('from_poi_ratio')
print "From POI Ratio added to features_list.", "\n"
num_to_poi_ratios = 0
total_to_poi_ratios = 0
num_from_poi_ratios = 0
total_from_poi_ratios = 0
for person in my_dataset:
if my_dataset[person]['to_poi_ratio'] != "NaN":
num_to_poi_ratios += 1
total_to_poi_ratios += my_dataset[person]['to_poi_ratio']
if my_dataset[person]['from_poi_ratio'] != "NaN":
num_from_poi_ratios += 1
total_from_poi_ratios += my_dataset[person]['from_poi_ratio']
avg_to_poi_ratio = total_to_poi_ratios / float(num_to_poi_ratios)
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
# Scaler, if needed.
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
features = scaler.fit_transform(features)
# Select the 11 best features with SelectKBest
from sklearn.feature_selection import SelectKBest
print "SelectKBest Feature Ranking"
k_best = SelectKBest(k=11)
k_best.fit(features, labels)
results_list = zip(k_best.get_support(), features_list[1:], k_best.scores_)
results_list = sorted(results_list, key=lambda x: x[2], reverse=True)
count = 1
refined_features = []
for i in results_list:
if i[0]:
print count, "\t", i
refined_features.append(i[1])
count += 1
# Ranked 11 best features as determined by SelectKFeatures
features_list = ['poi',
'exercised_stock_options',
'total_stock_value',
'bonus',
'salary',
'deferred_income', #5
'long_term_incentive', # 6
'restricted_stock',# 7
'total_payments', # 8
'loan_advances', # 9
'expenses', # 10
'from_poi_ratio' # 11
]
########## FINAL FEATURES LIST #########
features_list = ['poi',
'exercised_stock_options',
'total_stock_value',
'bonus']
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
# Scaler, for K Nearest Neighbors.
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# features = scaler.fit_transform(features)
### Task 4: Try a variety of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
## For local testing
def print_results(i):
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, random_state=42)
clf.fit(features_train, labels_train)
print 'Best score: %0.3f' % clf.best_score_
print 'Best parameters set:'
best_parameters = clf.best_estimator_.get_params()
new_params = {}
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
new_params[param_name] = best_parameters[param_name]
predictions = clf.predict(features_test)
# print 'Accuracy: ', accuracy_score(labels_test, predictions)
# print 'Precision: ', precision_score(labels_test, predictions)
# print 'Recall: ', recall_score(labels_test, predictions)
print "----------------------------------------------------------------------"
clf_list = ["tree", "bayes", "adaboost", "kNeighbors"]
for i in clf_list:
print "---------------------------"+ i.upper() +"----------------------------"
if i == "tree":
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier()
parameters = {'criterion': ["gini", "entropy"],
'splitter': ['best', 'random'],
'min_samples_split': [2,3,4,5]
}
clf = GridSearchCV(tree, parameters, verbose=1, cv=10)
print_results(i)
if i == "bayes":
from sklearn.naive_bayes import GaussianNB
bayes = GaussianNB()
parameters = {}
clf = GridSearchCV(bayes, parameters, verbose=1, cv=10)
print_results(i)
if i == "adaboost":
from sklearn.ensemble import AdaBoostClassifier
adaboost = AdaBoostClassifier(DecisionTreeClassifier(criterion='entropy', min_samples_split=3, splitter='best'))
parameters = {'n_estimators': [10, 20, 30, 40, 50, 60, 70],
'algorithm': ['SAMME', 'SAMME.R'],
'learning_rate': [.5,.8, 1, 1.2, 1.5]}
clf = GridSearchCV(adaboost, parameters, verbose=1, cv=10)
print_results(i)
if i == "kNeighbors":
from sklearn.neighbors import KNeighborsClassifier
kNeighbors = KNeighborsClassifier()
parameters = {'n_neighbors': [2,3,4,5,6,7],
'algorithm': ['ball_tree', 'kd_tree', 'brute', 'auto'],
'weights': ['uniform', 'distance'],
'p': [3,4,5,6,7,8]
}
clf = GridSearchCV(kNeighbors, parameters, verbose=1, cv=10)
print_results(i)
########### BACKUP CLASSIFIERS ###########
#from sklearn.tree import DecisionTreeClassifier
#clf = DecisionTreeClassifier(criterion='entropy', min_samples_split=2, splitter='best')
#from sklearn.naive_bayes import GaussianNB
#clf = GaussianNB()
#from sklearn.neighbors import KNeighborsClassifier
#clf = KNeighborsClassifier(algorithm='ball_tree', n_neighbors=2, p=6, weights='uniform')
###########################
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script.
### Because of the small size of the dataset, the script uses stratified
### shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
########### FINAL CLASSIFIER ##############
from sklearn.ensemble import AdaBoostClassifier
clf = AdaBoostClassifier(DecisionTreeClassifier(criterion='entropy', min_samples_split=3, splitter='best'),
algorithm='SAMME', learning_rate=1.5, n_estimators=40)
test_classifier(clf, my_dataset, features_list)
### Dump your classifier, dataset, and features_list so
### anyone can run/check your results.
dump_classifier_and_data(clf, my_dataset, features_list)
| true |
0d3dfc4315e122d445a741a9f63a18185ad394b4 | Python | Jav1-Mart1nez/W4-apis-project | /src/main.py | UTF-8 | 1,739 | 2.921875 | 3 | [] | no_license | import argparse
import pandas as pd
import genera_args as gen_a
def main():
# Importamos el DataFrame que queremos filtrar.
videogames = pd.read_csv("../outputs/clean_metacritic_games.csv")
args = gen_a.filtro()
release_date = args.year
platform = args.platform
genre = args.genre
metascore = args.calification
# Creamos los condicionales para el filtrado de datos.
if platform:
videogames1 = videogames[videogames.platform==args.platform].head()
if platform not in "Switch, 3DS, WIIU":
print("no has introducido una plataforma correcta, por favor consulta -help para observar las plataformas disponibles")
if release_date:
videogames2 = videogames1[videogames1.release_date==args.year].head()
if release_date<2011 or release_date>2020:
print("el year debe estar comprendido entre 2011 y 2020 (ambos inclusive)")
if genre:
videogames3 = videogames2[videogames2.genre==args.genre].head()
if genre not in "Action, Role-Playing, Action Adventure, Miscellaneous, Adventure, Puzzle, Strategy, Sports, Simulation, General, Casual, Driving, Platformer, Action RPG, Racing, Fantasy, RPG, Scrolling, Fighting, Breeding/Constructing, Japanese-Style, Other, Sim, Console-style RPG, Sci-Fi, Modern":
print("no has introducido un género correcto, por favor consulta -help para observar los géneros disponibles")
if metascore:
videogames4 = videogames3[videogames3.metascore==args.calification].head()
if metascore<71 or metascore>97:
print("la calification debe estar comprendido entre 71 y 97 (ambos inclusive)")
print(videogames4)
if __name__ == "__main__":
main() | true |
d2d78c3ccb6719074eae1f8a56392bc72a90aec4 | Python | lafionium/DMI | /PYTHON/hello.py | UTF-8 | 1,120 | 2.96875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
#print "Hello World!"
'''
print "Here \bthe \bspaces \bare \bbackspaced."
raksta bez atstarpem \b peremesaet kursor nazad
'''
'''
print "Here \nthe \nspaces \nare \nnewlined."
piwet kazdoje slovo s novoj strocki \n
'''
'''
print "Here \tthe \tspaces \thave \thorizontal \ttab \tspaces."
piwet vmesto probela TAB
hf |
ghf |
jghhf |
'''
'''
print "Here \vthe \vspaces \vhave \vvertical \vtab \vspaces."
Here
the
spaces
have
vertical
tab
spaces.
'''
print "Сижу \vза \vрешеткой \vв \vтемнице \vсырой.\vВскормленный \vв \vневоле \vорел \vмолодой,"
print"\t\t\t\t\t\t\t\tМой \n\t\t\t\t\t\t\tгрустный \n\t\t\t\t\t\tтоварищ, \n\t\t\t\t\tмахая \n\t\t\t\tкрылом,\n\t\t\tКровавую \n\t\tпищу \n\tклюет \nпод окном,"
| true |
d132790a36b255338fb6c7c2e4d8a93672844710 | Python | LauraAndreaM/rpi-digitales3 | /led.py | UTF-8 | 294 | 3.46875 | 3 | [] | no_license | #Encender Leds por teclado
import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
while(True):
teclado= input("'a' encendido, 'b' apagado:")
if teclado == "a":
GPIO.output(5,True)
if teclado == "b":
GPIO.output(5,False)
GPIO.cleanup()
| true |
2b54df2fa836be9abc8153fee971373ae8101240 | Python | RenukaNaik/Python | /positiveNum.py | UTF-8 | 756 | 3.8125 | 4 | [] | no_license | #let's code loops!
#Task 2:
#Write a python program to print all positive numbers in a range
list1=[]
list2=[]
n=int(input('enter number of elements: '))
i=0
while i<n:
num=int(input())
list1.append(num)
i=i+1
print('Input list: ',list1)
for j in list1:
if(j>0):
list2.append(j)
print('Output list: ',list2)
'''
Output:
Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>>
====== RESTART: C:/Users/HP/Desktop/Python/lets code loops/positiveNum.py ======
enter number of elements: 5
-3
11
55
-67
43
Input list: [-3, 11, 55, -67, 43]
Output list: [11, 55, 43]
>>>
'''
| true |
a250484426993a385e5628dd76d9aefa54ee947b | Python | alvinwang922/Data-Structures-and-Algorithms | /Queues/Sliding-Window-Max.py | UTF-8 | 986 | 4.125 | 4 | [] | no_license | """
Given an array nums, there is a sliding window of size k which
is moving from the very left of the array to the very right. You
can only see the k numbers in the window. Each time the sliding
window moves right by one position. Return the max sliding window.
Follow up: Could you solve it in linear time?
"""
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int):
q, res = deque(), []
for i in range(len(nums)):
if i - k >= 0:
res.append(nums[q[0]])
while q and q[0] <= i - k:
q.popleft()
while q and nums[i] > nums[q[-1]]:
q.pop()
q.append(i)
res.append(nums[q[0]])
return res
print(maxSlidingWindow([1, 3, -1, -3, 5, 3, 6, 7], 3))
print(maxSlidingWindow([1, 3, 4, 2, 5, 7], 2))
print(maxSlidingWindow([1, 2, 4, 5, 9, 7], 4))
print("The arrays above should be [3, 3, 5, 5, 6, 7], \
[3, 4, 4, 5, 7], and [5, 9, 9].")
| true |
c27bdd751c3061be4b4ccc0716469451078314c9 | Python | SanjitRao/multiGameApp | /Unit4Project/httpdb_data server.py | UTF-8 | 1,598 | 2.703125 | 3 | [] | no_license | import wsgiref.simple_server
import urllib.parse
from cs043_lesson2_2.database import Simpledb
def application(environ, start_response):
headers = [('Content-Type', 'text/plain; charset=utf-8')]
path = environ['PATH_INFO']
params = urllib.parse.parse_qs(environ['QUERY_STRING'])
db = Simpledb('datafile.txt')
if path == '/insert':
start_response('200 OK', headers)
i = db.insert(params['key'][0], params['value'][0])
if i:
return ['Inserted'.encode()]
else:
return ['Was not inserted properly, try again'.encode()]
elif path == '/select': # todo figure out if this works
start_response('200 OK', headers)
s = db.select_one(params['key'][0])
if s[0]:
return [s[1].encode()]
else:
return ['NULL'.encode()]
elif path == '/delete': # todo need to complete this function
start_response('200 OK', headers)
d = db.delete(params['key'][0])
if d:
return ['DELETED'.encode()]
else:
return ['NULL'.encode()]
elif path == '/update':
start_response('200 OK', headers)
up = db.update(params['key'][0], params['value'][0])
if up:
return ['UPDATED'.encode()] # todo see if i dont need to encode this and line 32
else:
return ['NULL'.encode()]
else:
start_response('404 Not Found', headers)
return ['Status 404: Resource not found'.encode()]
httpd = wsgiref.simple_server.make_server('', 8000, application)
httpd.serve_forever()
| true |