blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a337cd7e52086c9133fd71dcc92cc8a02615f7e7 | Python | shenely/ouroboros | /ob-math/ob-math/vec.py | UTF-8 | 10,513 | 2.75 | 3 | [] | no_license | # built-in libraries
import math
import itertools
# external libraries
import numpy
import scipy.linalg
# internal libraries
from ouroboros import Type, Image, Node
# exports
__all__= ("vector",
"abs2rel", "rel2abs", # absolute <-> relative frame
"nrt2rot", "rot2nrt", # inertial <-> rotating frame
"fun2obl", "obl2fun", # fundamental <-> oblique plane
"rec2sph", "sph2rec") # rectangular to spherical coordinates
# numpy.ndarray <-> JSON
vector = Type("!math/vector", numpy.ndarray,
numpy.ndarray.tolist,
numpy.array)
@Image(".vec@abs2rel",
src=Node(evs=(True,), args=(),
ins=(True,), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
trg=Node(evs=(), args=(),
ins=(), reqs=(),
outs=(True,), pros=("_bar", "_t_bar")),
ref=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()))
def abs2rel(src, trg, ref):
"""Absolute to relative origin"""
yield
while True:
ref_bar, ref_t_bar = ref.reqs
src_bar, src_t_bar = src.reqs
trg_bar = src_bar - ref_bar
trg_t_bar = src_t_bar - ref_t_bar
trg.pros = trg_bar, trg_t_bar
yield (trg.outs((True,)),)
@Image(".vec@rel2abs",
src=Node(evs=(), args=(),
ins=(), reqs=(),
outs=(True,), pros=("_bar", "_t_bar")),
trg=Node(evs=(True,), args=(),
ins=(True,), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
ref=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()))
def rel2abs(src, trg, ref):
"""Relative to absolute origin"""
yield
while True:
ref_bar, ref_t_bar = ref.reqs
trg_bar, trg_t_bar = trg.reqs
src_bar = trg_bar + ref_bar
src_t_bar = trg_t_bar + ref_t_bar
src.pros = nsrc_bar, src_t_bar
yield (src.outs((True,)),)
@Image(".vec@nrt2rot",
nrt=Node(evs=(True,), args=(),
ins=(True,), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
rot=Node(evs=(), args=(),
ins=(), reqs=(),
outs=(True,), pros=("_bar", "_t_bar")),
ax=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()))
def nrt2rot(nrt, rot, ax):
"""Inertial to rotating frame"""
yield
while True:
ax_bar, ax_t_bar = ax.reqs
nrt_bar, nrt_t_bar = nrt.reqs
#sin and cos
th = scipy.linalg.norm(_bar)
ax_hat = ax_bar / th
cos_th = math.cos(th)
sin_th = math.sin(th)
th_t = numpy.dot(_bar, _t_bar) / th
ax_t_hat = (th * ax_t_bar - th_t * ax_bar) / th ** 2
#dot and cross products
dot_th = numpy.dot(ax_hat, nrt_bar)
cross_th = numpy.cross(ax_hat, nrt_bar)
#XXX there still might be a sign wrong here somewhere...
rot_bar = (cos_th * nrt_bar
- sin_th * cross_th
+ (1 - cos_th) * dot_th * ax_hat)
rot_t_bar = (cos_th * (nrt_t_bar - th_t * cross_th)
- sin_th * (numpy.cross(ax_t_hat, nrt_bar) +
numpy.cross(ax_hat, nrt_t_bar) +
th_t * (nrt_bar - dot_th * ax_hat))
+ (1 - cos_th) * (ax_hat * (numpy.dot(ax_t_hat, nrt_bar) +
numpy.dot(ax_hat, nrt_t_bar)) +
dot_th * ax_t_hat))
rot.pros = rot_bar, rot_t_bar
yield (rot.outs((True,)),)
@Image(".vec@rot2nrt",
nrt=Node(evs=(), args=(),
ins=(), reqs=(),
outs=(True,), pros=("_bar", "_t_bar")),
rot=Node(evs=(True,), args=(),
ins=(True,), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
ax=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),outs=(), pros=()))
def rot2nrt(nrt, rot, ax):
"""Rotating to inertial frame"""
yield
while True:
ax_bar, ax_t_bar = ax.reqs
rot_bar, rot_t_bar = rot.reqs
#sin and cos
th = scipy.linalg.norm(_bar)
ax_hat = ax_bar / th
cos_th = math.cos(th)
sin_th = math.sin(th)
th_t = numpy.dot(_bar, _t_bar) / th
ax_t_hat = (th * ax_t_bar - th_t * ax_bar) / th ** 2
#dot and cross products
dot_th = numpy.dot(ax_hat, rot_bar)
cross_th = numpy.cross(ax_hat, rot_bar)
#XXX there still might be a sign wrong here somewhere...
rot_bar = (cos_th * nrt_bar
+ sin_th * cross_th
+ (1 - cos_th) * dot_th * ax_hat)
rot_t_bar = (cos_th * (nrt_t_bar + th_h * cross_th)
+ sin_th * (numpy.cross(ax_t_hat, rot_bar) +
numpy.cross(ax_hat, rot_t_bar) -
th_t * (nrt_bar - dot_th * ax_hat))
+ (1 - cos_th) * (ax_hat * (numpy.dot(ax_t_hat, rot_bar) +
numpy.dot(ax_hat, rot_t_bar)) +
dot_th * ax_t_hat))
rot.pros = rot_bar, rot_t_bar
yield (rot.outs((True,)),)
@Image(".vec@fun2obl",
fun=Node(evs=("rec",), args=(),
ins=("rec",), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
obl=Node(evs=(), args=(),
ins=(), reqs=(),
outs=("rec",), pros=("_bar", "_t_bar")),
node=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
pole=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()))
def fun2obl(fun, obl, node, pole):
"""Fundamental to oblique plane"""
yield
while True:
n_bar, n_t_bar = node.reqs
p_bar, p_t_bar = pole.reqs
fun_bar, fun_t_bar = fun.reqs
#vector triad
t_bar = numpy.cross(p_bar, n_bar)
t_t_bar = (numpy.cross(p_t_bar, n_bar) +
numpy.cross(p_bar, n_t_bar))
#vector normal
n = scipy.linalg.norm(n_bar)
t = scipy.linalg.norm(t_bar)
#unit vectors
j_hat = t_bar / t
i_hat = n_bar / n
k_hat = numpy.cross(i_hat, j_hat)
#vector normal rates
n_t = numpy.dot(n_bar, n_t_bar) / n
t_t = numpy.dot(t_bar, t_t_bar) / t
#unit vector rates
i_t_hat = (n_t_bar - n_t * i_hat) / n
j_t_hat = (t_t_bar - t_t * j_hat) / t
k_t_hat = (numpy.cross(i_t_hat, j_hat) +
numpy.cross(i_hat, j_t_hat))
obl_bar = numpy.array([numpy.dot(fun_bar, i_hat),
numpy.dot(fun_bar, j_hat),
numpy.dot(fun_bar, k_hat)])
obl_t_bar = numpy.array([numpy.dot(fun_t_bar, i_hat) +
numpy.dot(fun_bar, i_t_hat),
numpy.dot(fun_t_bar, j_hat) +
numpy.dot(fun_bar, j_t_hat),
numpy.dot(fun_t_bar, k_hat) +
numpy.dot(fun_bar, k_t_hat)])
obl.pros = obl_bar, obl_t_bar
yield (obl.outs((True,)),)
@Image(".vec@obl2fun",
fun=Node(evs=(), args=(),
ins=(), reqs=(),
outs=("rec",), pros=("_bar", "_t_bar")),
obl=Node(evs=("rec",), args=(),
ins=("rec",), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
node=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()),
pole=Node(evs=(), args=(),
ins=(), reqs=("_bar", "_t_bar"),
outs=(), pros=()))
def obl2fun(fun, obl, node, pole):
"""Oblique to fundamental plane"""
yield
while True:
n_bar, n_t_bar = node.reqs
p_bar, p_t_bar = pole.reqs
obl_bar, obl_t_bar = obl.reqs
#vector triad
t_bar = numpy.cross(p_bar, n_bar)
t_t_bar = (numpy.cross(p_t_bar, n_bar) +
numpy.cross(p_bar, n_t_bar))
#vector normal
n = scipy.linalg.norm(n_bar)
t = scipy.linalg.norm(t_bar)
#unit vectors
j_hat = t_bar / t
i_hat = n_bar / n
k_hat = numpy.cross(i_hat, j_hat)
#vector normal rates
n_t = numpy.dot(n_bar, n_t_bar) / n
t_t = numpy.dot(t_bar, t_t_bar) / t
#unit vector rates
i_t_hat = (n * n_t_bar - n_t * n_bar) / (n * n)
j_t_hat = (t * t_t_bar - t_t * t_bar) / (t * t)
k_t_hat = (numpy.cross(i_t_hat, j_hat) +
numpy.cross(i_hat, j_t_hat))
fun_bar = (obl_bar[0] * i_hat +
obl_bar[1] * j_hat +
obl_bar[2] * k_hat)
fun_t_bar = (obl_t_bar[0] * i_hat + obl_bar[0] * i_t_hat +
obl_t_bar[1] * j_hat + obl_bar[1] * j_t_hat +
obl_t_bar[2] * k_hat + obl_bar[2] * k_t_hat)
fun.pros = fun_bar, fun_t_bar
yield (fun.outs((True,)),)
@Image(".vec@rec2sph",
vec=Node(evs=("rec",), args=(),
ins=(), reqs=("_bar"),
outs=("sph",), pros=("r", "az", "el")))
def rec2sph(vec):
"""Rectangular to spherical coordinates"""
yield
while True:
_bar, = vec.reqs
r = scipy.linalg.norm(_bar)
az = math.atan2(_bar[1], _bar[0])
el = math.asin(_bar[2] / r)
vec.pros = r, az, el
yield (vec.outs((True,)),)
@Image(".vec@sph2rec",
vec=Node(evs=("sph",), args=(),
ins=(), reqs=("r", "az", "el"),
outs=("rec",), pros=("_bar")))
def sph2rec(vec):
"""Spherical to rectangular coordinates"""
yield
while True:
r, az, el = vec.reqs
cos_az = math.cos(az)
sin_az = math.sin(az)
cos_el = math.cos(el)
sin_el = math.sin(el)
_bar = numpy.array([r * cos_az * cos_el,
r * sin_az * cos_el,
r * sin_el])
vec.pros = _bar,
yield (vec.outs((True,)),)
| true |
01244cdce59919aaa9e3cd68beda3487cb6f4d28 | Python | yauhenhapan/python-labs | /Lab3/many_random_walk_ans.py | UTF-8 | 1,390 | 3.828125 | 4 | [] | no_license | import numpy as np
def random_walks(walks_count):
walks = []
number_of_steps = 1500
for i in range(walks_count):
draws = np.random.randint(0, 2, size=number_of_steps)
steps = np.where(draws > 0, 1, -1)
walk = steps.cumsum()
walks.append(walk)
return walks
def find_min_position_in_walks(walks):
min_positions = []
for walk in walks:
min_positions.append(walk.min())
return np.array(min_positions).min()
def find_max_position_in_walks(walks):
max_positions = []
for walk in walks:
max_positions.append(walk.max())
return np.array(max_positions).max()
def find_minimum_transition_time(walks, threshold):
min_jump_over_threshold = []
for walk in walks:
jump = (np.abs(walk) >= threshold).argmax()
min_jump_over_threshold.append(jump if jump > 0 else np.Inf)
return np.array(min_jump_over_threshold).min()
def main():
walks_count = 5000
transition_value = 30
walks = random_walks(walks_count)
min_transition_time = find_minimum_transition_time(walks, transition_value)
max_walk = find_max_position_in_walks(walks)
min_walk = find_min_position_in_walks(walks)
print("max walk = ", max_walk)
print("min walk = ", min_walk)
print("min transition time to get over than {0} = {1}".format(transition_value, min_transition_time))
main()
| true |
35af26db2f15bb57c7944525054a5408a1effc99 | Python | kumar-shubham/wisecells-library | /library/forms.py | UTF-8 | 986 | 2.578125 | 3 | [] | no_license | from django import forms
from library.models import Books
from django.core.validators import validate_email
class AddBookForm(forms.Form):
book_id = forms.IntegerField()
book_name = forms.CharField(max_length=50)
count = forms.IntegerField()
author = forms.CharField(max_length=50)
def clean_book_name(self):
book_name = self.cleaned_data['book_name']
if len(book_name) < 3:
raise forms.ValidateError("Atleast 3 characters required!")
else:
return book_name
class AddStudentForm(forms.Form):
s_id = forms.CharField(max_length=4)
name = forms.CharField(max_length=50)
email = forms.EmailField()
def clean_name(self):
name = self.cleaned_data['name']
if len(name) < 3:
raise forms.ValidateError("Atleast 3 characters required")
else:
return name
def clean_email(self):
email = self.cleaned_data['email']
try:
validate_email(email)
return email
except ValidationError:
raise forms.ValidationError("Enter a valid email address")
| true |
884c3ba8b6e61142818518e76891c95ca7af9eea | Python | madclumsil33t/atat | /atat/utils/session_limiter.py | UTF-8 | 650 | 2.546875 | 3 | [
"MIT"
] | permissive | from atat.domain.users import Users
class SessionLimiter(object):
def __init__(self, config, session, redis):
self.limit_logins = config["LIMIT_CONCURRENT_SESSIONS"]
self.session_prefix = config.get("SESSION_KEY_PREFIX", "session:")
self.session = session
self.redis = redis
def on_login(self, user):
if not self.limit_logins:
return
session_id = self.session.sid
self._delete_session(user.last_session_id)
Users.update_last_session_id(user, session_id)
def _delete_session(self, session_id):
self.redis.delete(f"{self.session_prefix}{session_id}")
| true |
f1a1e3fe0d1efe29838093be6173c483fac1e0c0 | Python | exxxar/python | /оценки и лабы других/бурцев/lab1/lab1/7.py | WINDOWS-1251 | 318 | 3.203125 | 3 | [] | no_license | # -*- coding: cp1251 -*-
try:
karta=raw_input(' 16 ')
if len(karta)!=16:
raise ValueError(' . 16 ')
print karta[0:4]+"*"*8+karta[12:16]
except ValueError,e:
print e
| true |
d2a74914a2ac20e34eb5f4c72edbc4d23af71438 | Python | kamyu104/GoogleCodeJam-2016 | /Round 1A/the-last-word.py | UTF-8 | 684 | 3.5 | 4 | [
"MIT"
] | permissive | # Copyright (c) 2016 kamyu. All rights reserved.
#
# Google Code Jam 2016 Round 1A - Problem A. The Last Word
# https://code.google.com/codejam/contest/4304486/dashboard#s=p0
#
# Time: O(L), L is the length of S
# Space: O(L)
#
from collections import deque
def the_last_word():
S = raw_input().strip()
word = deque()
for c in S:
# Use greedy strategy to put the smaller char in the back,
# and put the larger or equal char in the front.
if word and c >= word[0]:
word.appendleft(c)
else:
word.append(c)
return "".join(word)
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, the_last_word())
| true |
31c3018f23e4e467a1f3f57c54c11df80b3d963f | Python | MMMdata/BigQuery_auto_reporting | /load_data_from_csv.py | UTF-8 | 4,072 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line application that loads data into BigQuery from a CSV file in
Google Cloud Storage.
This sample is used on this page:
https://cloud.google.com/bigquery/loading-data-into-bigquery#loaddatagcs
For more information, see the README.md under /bigquery.
"""
import json
import time
import uuid
import yaml
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
bq_import_creds = yaml.load(open('bq_import_creds.yml'))
project_id = bq_import_creds['project_id']
dataset_id = bq_import_creds['dataset_id']
source_schema = bq_import_creds['schema']
table_name = dataset_id+string.replace(filename, ".csv", "")
source_path = cloud_storage_dir+table_id+filename
data_path = source_path
# [START load_table]
def load_table(bigquery, project_id, dataset_id, table_name, source_schema,
source_path, num_retries=5):
"""
Starts a job to load a bigquery table from CSV
Args:
bigquery: an initialized and authorized bigquery client
google-api-client object
source_schema: a valid bigquery schema,
see https://cloud.google.com/bigquery/docs/reference/v2/tables
source_path: the fully qualified Google Cloud Storage location of
the data to load into your table
Returns: a bigquery load job, see
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
"""
# Generate a unique job_id so retries
# don't accidentally duplicate query
job_data = {
'jobReference': {
'projectId': project_id,
'job_id': str(uuid.uuid4())
},
'configuration': {
'load': {
'sourceUris': [source_path],
'schema': {
'fields': source_schema
},
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_name
}
}
}
}
return bigquery.jobs().insert(
projectId=project_id,
body=job_data).execute(num_retries=num_retries)
# [END load_table]
# [START poll_job]
def poll_job(bigquery, job):
"""Waits for a job to complete."""
print('Waiting for job to finish...')
request = bigquery.jobs().get(
projectId=job['jobReference']['projectId'],
jobId=job['jobReference']['jobId'])
while True:
result = request.execute(num_retries=2)
if result['status']['state'] == 'DONE':
if 'errorResult' in result['status']:
raise RuntimeError(result['status']['errorResult'])
print('Job complete.')
return
time.sleep(1)
# [END poll_job]
# [START run]
def main(project_id, dataset_id, table_name, schema_file, data_path,
poll_interval, num_retries):
# [START build_service]
# Grab the application's default credentials from the environment.
credentials = GoogleCredentials.get_application_default()
# Construct the service object for interacting with the BigQuery API.
bigquery = discovery.build('bigquery', 'v2', credentials=credentials)
# [END build_service]
with open(schema_file, 'r') as f:
schema = json.load(f)
job = load_table(
bigquery,
project_id,
dataset_id,
table_name,
schema,
data_path,
num_retries)
poll_job(bigquery, job)
# [END run]
| true |
ce2c4314a20addda5ae8fa672b699e822734b108 | Python | tolgakurtulus/Python-First-Little-Project | /degiskenler.py | UTF-8 | 222 | 3.03125 | 3 | [] | no_license | sayi = 10 # integer
tc = "10101010108" #string
isim = "engin"
soyisim = "demiroğ"
telefonNumarasi = "5354444444"
floatSayi = 10.0
print(float(tc) + 10)
print(type(floatSayi))
print(type(sayi))
print(type(tc))
| true |
3d2712bd8595bf86fa15998015e6cbbe6e7e63c0 | Python | vbehrens/Lab1 | /largest_num.py | UTF-8 | 250 | 3.25 | 3 | [] | no_license | lugage = int(raw_input("How much doues you lugage weigh, prompt enter a number"))
print type(lugage)
def weigh (lugage):
if lugage > 50:
print "There is a $25 charge for your suitcase"
else:
print "no charge"
weigh (lugage)
| true |
126025d90b455acd01941034a7a0c28f2b4ae22c | Python | jhassler1031/sports-api | /sports_api.py | UTF-8 | 1,399 | 2.953125 | 3 | [] | no_license | import records
from flask import Flask, jsonify
app = Flask(__name__)
db = records.Database("postgres://localhost/sports_db")
#first part of homework
@app.route("/first")
def show_html():
return "<div>Hello world!</div>"
#updated to output JSON instead of HTML
@app.route("/second")
def show_json():
list = [1, 2, 3, "this is a string"]
return jsonify(list)
#Display data from sports_stats table in sports_db
#=========================================================================
@app.route("/")
def show_teams():
return db.query("SELECT * FROM sports_stats;").export('json')
#==========================================================================
#long way below
"""
@app.route("/")
def show_teams():
teams = db.query("SELECT * FROM sports_stats;")
team_list = []
for team in teams:
team = team.as_dict()
row_list = {}
for key, value in team.items():
if str(value).isdigit():
value = int(value)
row_list[key] = value
team_list.append(row_list)
return jsonify(team_list)
teams = db.query("SELECT * FROM sports_stats;")
team_list = []
for team in teams:
team = team.as_dict()
row_list = {}
for key, value in team.items():
if str(value).isdigit():
value = int(value)
row_list[key] = value
team_list.append(row_list)
print(team_list)
"""
| true |
df2ab86fc34f65d8a9d72d9f3d849baa7efd0029 | Python | janvonrickenbach/Chaco_wxPhoenix_py3 | /chaco/scales/tests/test_time_scale.py | UTF-8 | 15,784 | 2.875 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | import datetime
import os
import contextlib
import numpy as np
from chaco.scales.time_scale import (tfrac, trange, TimeScale,
CalendarScaleSystem)
from chaco.scales.api import TimeFormatter
# Note on testing:
# Chaco assumes times are in UTC seconds since Posix Epoch but does ticking
# in whatever the local time is on the host machine. This is problematic for
# testing, because correct responses depend on current state of the test
# machine, and will vary by location and time of year. For hour/day/year
# testing, where TZ matter, we select 3 timezones to test in:
#
# * UTC, because it's easy to reason about what the answer should be
# * Australia/North (ie. Alice Springs, Darwin) which is UTC+9:30 and with no
# daylight savings time shifts, which should hopefully reveal bugs with
# non-integer hour offsets.
# * Pacific/Honolulu which is UTC-10:00 and with no daylight savings time
# shifts, which should hopefully reveal bugs with previous-day or -year
# baselines.
#
# Note that we don't actually change the timezone for the process, but this is
# good enough to test the logic for the tfrac and trange functions.
#----------------------------------------------------------------
# Utilities
#----------------------------------------------------------------
# Interesting timezone offsets
UTC = 0.0
ALICE_SPRINGS = 9.5 * 3600
HONOLULU = -10.0 * 3600
@contextlib.contextmanager
def set_timezone(tz):
""" Temporarily select the timezone to use.
This works by temporarily replacing the EPOCH module variable with a
different value.
Parameters
----------
tz : str or number
Either a standard tz offset
"""
import chaco.scales.safetime
import chaco.scales.time_scale
new_epoch = datetime.datetime.utcfromtimestamp(tz)
old_epoch = chaco.scales.safetime.EPOCH
# set module global EPOCHs based on new timezone
chaco.scales.safetime.EPOCH = new_epoch
chaco.scales.time_scale.EPOCH = new_epoch
try:
yield
finally:
# restore module global EPOCHs
chaco.scales.safetime.EPOCH = old_epoch
chaco.scales.time_scale.EPOCH = old_epoch
#----------------------------------------------------------------
# tfrac tests
#----------------------------------------------------------------
def test_tfrac_years_01():
with set_timezone(UTC):
t = 3601
(base, frac) = tfrac(t, years=1)
assert base == 0
assert frac == 3601
def test_tfrac_years_01_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3601
(base, frac) = tfrac(t, years=1)
assert base == 3600 * -9.5 # Alice Springs year start UTC timestamp
assert frac == 3600 * 10.5 + 1 # 10:30:01 in the morning Jan 1
def test_tfrac_years_01_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
t = 3601
(base, frac) = tfrac(t, years=1)
assert base == 3600 * (-365 * 24 + 10
) # previous Honolulu year start UTC timestamp
assert frac == 3600 * (364 * 24 + 15
) + 1 # 15:00:01 in the afternoon, Dec 31
def test_tfrac_years_02():
with set_timezone(UTC):
t = 3601
(base, frac) = tfrac(t, years=10)
assert base == 0
assert frac == 3601
def test_tfrac_years_02_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3601
(base, frac) = tfrac(t, years=10)
assert base == 3600 * -9.5 # Alice Springs decade start UTC timestamp
assert frac == 3600 * 10.5 + 1 # 10:30:01 in the morning Jan 1
def test_tfrac_years_02_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
t = 3601
(base, frac) = tfrac(t, years=10)
# previous Honolulu decade start UTC timestamp (including leap years)
assert base == 3600 * (-(365 * 10 + 3) * 24 + 10)
# 15:00:01 in the afternoon, Dec 31, 9 years into decade
assert frac == 3600 * ((365 * 9 + 3 + 364) * 24 + 15) + 1
def test_tfrac_days_01():
with set_timezone(UTC):
t = 3601
(base, frac) = tfrac(t, days=1)
assert base == 0
assert frac == 3601
def test_tfrac_days_01_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3601
(base, frac) = tfrac(t, days=1)
assert base == 3600 * -9.5 # Alice Springs day start UTC timestamp
assert frac == 3600 * 10.5 + 1 # 10:30:01 in the morning
def test_tfrac_days_01_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
t = 3601
(base, frac) = tfrac(t, days=1)
assert base == 3600 * (-24 + 10
) # previous Honolulu day start UTC timestamp
assert frac == 3600 * 15 + 1 # 15:00:01 in the afternoon
def test_tfrac_days_02():
with set_timezone(UTC):
t = 3 * 24.0 * 3600 + 1000.0
(base, frac) = tfrac(t, days=1)
assert base == 3600 * 24 * 3
assert frac == 1000
def test_tfrac_days_02_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3 * 24.0 * 3600 + 1000.0
(base, frac) = tfrac(t, days=1)
assert base == 3600 * (24 * 3 - 9.5)
assert frac == 3600 * 9.5 + 1000
def test_tfrac_days_02_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
t = 3 * 24.0 * 3600 + 1000.0
(base, frac) = tfrac(t, days=1)
assert base == 3600 * (24 * 2 + 10)
assert frac == 3600 * (24 - 10) + 1000
def test_tfrac_hours_01():
with set_timezone(UTC):
t = 3601
(base, frac) = tfrac(t, hours=1)
assert base == 3600
assert frac == 1
def test_tfrac_hours_01_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3601
(base, frac) = tfrac(t, hours=1)
assert base == 1800
assert frac == 1801
def test_tfrac_hours_02():
with set_timezone(UTC):
t = 3601
(base, frac) = tfrac(t, hours=2)
assert base == 0
assert frac == 3601
def test_tfrac_hours_02_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3601
(base, frac) = tfrac(t, hours=2)
assert base == 1800
assert frac == 1801
def test_tfrac_hours_03():
with set_timezone(UTC):
t = 3600 * 5.5
(base, frac) = tfrac(t, hours=2)
assert base == 3600 * 4
assert frac == 3600 * 1.5
def test_tfrac_hours_03_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3600 * 5.5
(base, frac) = tfrac(t, hours=2)
assert base == 3600 * 4.5
assert frac == 3600 * 1
def test_tfrac_hours_04():
with set_timezone(UTC):
t = 3600 * 5.5
(base, frac) = tfrac(t, hours=3)
assert base == 3600 * 3.0
assert frac == 3600 * 2.5
def test_tfrac_hours_04_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3600 * 5.5
(base, frac) = tfrac(t, hours=3)
assert base == 3600 * 5.5
assert frac == 3600 * 0
def test_tfrac_hours_05():
with set_timezone(UTC):
t = 3600 * 15.5
(base, frac) = tfrac(t, hours=6)
assert base == 3600 * 12.0
assert frac == 3600 * 3.5
def test_tfrac_hours_05_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
t = 3600 * 15.5
(base, frac) = tfrac(t, hours=6)
assert base == 3600 * 14.5
assert frac == 3600 * 1.0
def test_tfrac_minutes_01():
t = 3601
(base, frac) = tfrac(t, minutes=1)
assert base == 3600
assert frac == 1
def test_tfrac_minutes_02():
t = 123.5
(base, frac) = tfrac(t, minutes=1)
assert base == 120
assert frac == 3.5
def test_tfrac_seconds_01():
t = 3601
(base, frac) = tfrac(t, seconds=1)
assert base == 3601
assert frac == 0
def test_tfrac_seconds_02():
t = 1.75
(base, frac) = tfrac(t, seconds=1)
assert base == 1
assert frac == 0.75
def test_tfrac_milliseconds_01():
t = 123.5
(base, frac) = tfrac(t, milliseconds=1)
assert base == 123.5
assert frac == 0.0
def test_tfrac_milliseconds_02():
t = 10.0625
(base, frac) = tfrac(t, milliseconds=1)
assert base == 10.062
assert frac == 0.0005
def test_tfrac_milliseconds_03():
t = 10.0625
(base, frac) = tfrac(t, milliseconds=10)
assert base == 10.06
assert frac == 0.0025
def test_tfrac_milliseconds_04():
t = 1.0078121
# Note that the last digit is lost due to rounding to microsecond scale.
(base, frac) = tfrac(t, milliseconds=1)
assert base == 1.007
assert frac == 0.000812
def test_tfrac_milliseconds_05():
t = 1.0078056
# Note that the last digit is lost due to rounding to microsecond scale.
(base, frac) = tfrac(t, milliseconds=1)
assert base == 1.007
assert frac == 0.000806
#----------------------------------------------------------------
# trange tests
#----------------------------------------------------------------
def test_trange_hours_01():
with set_timezone(UTC):
r = trange(0, 1, hours=1)
assert r == []
def test_trange_hours_01_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
r = trange(0, 1, hours=1)
assert r == []
def test_trange_hours_01_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
r = trange(0, 1, hours=1)
assert r == []
def test_trange_hours_02():
with set_timezone(UTC):
r = trange(-1, 1, hours=1)
assert r == [0.0]
def test_trange_hours_02_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
r = trange(-1, 1, hours=1)
assert r == []
def test_trange_hours_02_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
r = trange(-1, 1, hours=1)
assert r == [0.0]
def test_trange_hours_03():
with set_timezone(UTC):
r = trange(0, 3600, hours=1)
assert r == [0.0, 3600.0]
def test_trange_hours_03_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
r = trange(0, 3600, hours=1)
assert r == [1800.0]
def test_trange_hours_03_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
r = trange(0, 3600, hours=1)
assert r == [0.0, 3600.0]
def test_trange_hours_04():
with set_timezone(UTC):
r = trange(-3600, 3600, hours=1)
assert r == [-3600.0, 0.0, 3600.0]
def test_trange_hours_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
r = trange(-3600, 3600, hours=1)
assert r == [-1800.0, 1800.0]
def test_trange_hours_04_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
r = trange(-3600, 3600, hours=1)
assert r == [-3600.0, 0.0, 3600.0]
def test_trange_hours_05():
with set_timezone(UTC):
r = trange(-10, 3610, hours=1)
assert r == [0.0, 3600.0]
def test_trange_hours_06():
with set_timezone(UTC):
r = trange(-10, 7210, hours=1)
assert r == [0.0, 3600.0, 7200.0]
def test_trange_hours_07():
with set_timezone(UTC):
r = trange(-10, 7210, hours=2)
assert r == [0.0, 7200.0]
def test_trange_hours_07_Alice_Springs():
# Australia/North (UTC+09:30, never DST)
with set_timezone(ALICE_SPRINGS):
r = trange(-10, 7210, hours=2)
assert r == [1800.0]
def test_trange_hours_07_Honolulu():
# Pacific/Honolulu (UTC-10:00, never DST)
with set_timezone(HONOLULU):
r = trange(-10, 7210, hours=2)
assert r == [0.0, 7200.0]
def test_trange_seconds_01():
r = trange(0, 1, seconds=1)
assert r == [0.0, 1.0]
def test_trange_seconds_02():
r = trange(0, 10, seconds=1)
assert r == list(range(11))
def test_trange_seconds_03():
r = trange(0, 1.5, seconds=1)
assert r == [0.0, 1.0]
def test_trange_milliseconds_01():
r = trange(0, 0.1, milliseconds=1)
assert np.allclose(np.array(r), np.linspace(0.0, 0.1, 101))
def test_trange_milliseconds_02():
r = trange(-0.002, 0.001, milliseconds=1)
assert np.allclose(np.array(r), np.linspace(-0.002, 0.001, 4))
#----------------------------------------------------------------
# TimeScale tests
#----------------------------------------------------------------
# Could use more tests here... --WW
def test_time_scale_seconds_01():
ts = TimeScale(seconds=1)
ticks = ts.ticks(0, 10)
assert (np.array(ticks) == np.linspace(0.0, 10.0, 11)).all()
def test_time_scale_seconds_02():
ts = TimeScale(seconds=2)
ticks = ts.ticks(0, 10)
assert (np.array(ticks) == np.linspace(0.0, 10.0, 6)).all()
def test_time_scale_milliseconds_01():
ts = TimeScale(milliseconds=1)
ticks = ts.ticks(0, 0.1)
assert len(ticks) == 11
assert (np.array(ticks) == np.linspace(0.0, 0.1, 11)).all()
def test_time_scale_with_formatter():
""" Regression test for TimeScale() with formatter keyword.
Using the formatter keyword in the constructor of TimeScale
could raise a KeyError. This test passes if no exception is
raised.
"""
ts = TimeScale(seconds=1, formatter=TimeFormatter())
ts = TimeScale(minutes=1, formatter=TimeFormatter())
#----------------------------------------------------------------
# CalendarScaleSystem tests
#----------------------------------------------------------------
def test_calendar_scale_system_01():
css = CalendarScaleSystem()
ticks = css.ticks(0, 10)
assert len(ticks) == 11
assert (np.array(ticks) == np.linspace(0, 10, 11)).all()
# TODO: Add more tests of the ticks() and labels() methods of
# the CalendarScaleSystem.
#
# Determine why the format switches from '##s' to ':##'
# as in the following, and create appropriate tests:
#
# In [145]: css.labels(71010,71021, numlabels=8, char_width=130)
# Out[145]:
# [(71010.0, '30s'),
# (71011.0, '31s'),
# (71012.0, '32s'),
# (71013.0, '33s'),
# (71014.0, '34s'),
# (71015.0, '35s'),
# (71016.0, '36s'),
# (71017.0, '37s'),
# (71018.0, '38s'),
# (71019.0, '39s'),
# (71020.0, '40s'),
# (71021.0, '41s')]
#
# In [146]: css.labels(71010,71022, numlabels=8, char_width=130)
# Out[146]:
# [(71010.0, ':30'),
# (71011.0, ':31'),
# (71012.0, ':32'),
# (71013.0, ':33'),
# (71014.0, ':34'),
# (71015.0, ':35'),
# (71016.0, ':36'),
# (71017.0, ':37'),
# (71018.0, ':38'),
# (71019.0, ':39'),
# (71020.0, ':40'),
# (71021.0, ':41'),
# (71022.0, ':42')]
#
# In [147]:
| true |
1b2bb5e026a2d48d6c98d6282aa6f43468c9e418 | Python | Harvnlenny/demo | /demo/features/steps/players.py | UTF-8 | 583 | 3.03125 | 3 | [] | no_license | from behave import given, when, then
from hamcrest import assert_that, equal_to, contains_string
from pages.home.home_page import HomePage
@when('a name is typed into the search field')
def player(context):
player_name = "Bernie Kosar"
hp = HomePage(context.driver)
hp.player_search(player_name)
@then('the User will be on the page of the Player')
def player(context):
player_name = "Bernie Kosar"
full_name = context.driver.find_element_by_xpath("//*[@id='meta']/div[2]/h1")
full_name_text = full_name.text
assert_that(full_name_text, contains_string(player_name)) | true |
e2a89949b2eaee7affb98f1c34aa5b3f9a4713e9 | Python | happyhappyhappyhappy/pythoncode | /atcoder/mizuiro/bitzentansaku/s8pc4b_build_color/second/submit2.py | UTF-8 | 1,315 | 2.75 | 3 | [] | no_license | # ライブラリのインポート
import sys
import pprint as pp
from collections import Counter
from itertools import product
from logging import getLogger, StreamHandler, DEBUG
# 入力のマクロ
def II(): return int(sys.stdin.readline())
def MI(): return map(int, sys.stdin.readline().split())
def LI(): return list(map(int, sys.stdin.readline().split()))
def LLI(rows_number): return [LI() for _ in range(rows_number)]
# デバッグ出力の作成
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
# クラス+メソッドを一関数
xdebug=logger.debug
ppp=pp.pprint
# Const
MAXSIZE = ( 1 << 59 ) -1
MINSIZE = -( 1 << 59) + 1
# 上から下へと行ってみる
N,K = MI()
A = LI()
answer = MAXSIZE
for SL in product([0,1] , repeat=N):
cnt=Counter(SL)
if cnt[1] != K:
continue
if SL[0] != 1:
continue
costmin=0
maxbuild=A[0]
for x in range(1,N):
if SL[x] == 1:
if A[x] <= maxbuild:
nowcost = maxbuild+1-A[x]
costmin = costmin+nowcost
maxbuild=maxbuild+1
maxbuild=max(maxbuild,A[x])
answer=min(costmin,answer)
print(answer)
| true |
e70627cd11fbf5124d1502b37d616c0e64295f10 | Python | facebookresearch/Kats | /kats/detectors/multivariate_detector.py | UTF-8 | 7,033 | 2.734375 | 3 | [
"MIT"
] | permissive | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module implements the multivariate Outlier Detection algorithm as a Detector Model.
"""
import json
from typing import Any, Optional
import numpy as np
import pandas as pd
from kats.consts import Params, TimeSeriesData
from kats.detectors.detector import DetectorModel
from kats.detectors.detector_consts import AnomalyResponse, ConfidenceBand
from kats.detectors.outlier import (
MultivariateAnomalyDetector,
MultivariateAnomalyDetectorType,
)
from kats.models.var import VARParams
class MultivariateAnomalyDetectorModel(DetectorModel):
"""Anamoly detection model based on outlier detector.
A Detector Model detects outliers for multivariate time series
Attributes:
data: Input metrics TimeSeriesData
params: Parameter class for multivariate VAR/ BVAR model
training_days: num of days of data to use for initial training.
If less than a day, specify as fraction of a day
model_type: The type of multivariate anomaly detector (currently 'BAYESIAN_VAR' and 'VAR' options available)
serialized_model: Optional; json, representing data from a previously serialized model.
"""
params: Params = VARParams(maxlags=2)
training_days: float
model_type: MultivariateAnomalyDetectorType = MultivariateAnomalyDetectorType.VAR
model: Optional[MultivariateAnomalyDetector] = None
def __init__(
self,
params: Params = params,
training_days: float = 60.0,
serialized_model: Optional[bytes] = None,
) -> None:
if serialized_model:
model_dict = json.loads(serialized_model)
self.params = model_dict["params"]
self.training_days = model_dict["training_days"]
else:
if params:
self.params = params
self.training_days = training_days
def serialize(self) -> bytes:
"""Serialize the model into a json.
Serialize the model into a json so it can be loaded later.
Returns:
json containing information of the model.
"""
model_dict = {
"params": self.params,
"training_days": self.training_days,
"model_type": self.model_type,
}
# model_dict = {"training_days": self.training_days, "model_type": self.model_type}
return json.dumps(model_dict).encode("utf-8")
def fit(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
**kwargs: Any,
) -> None:
"""Fit MultivariateAnomalyDetector.
Fit MultivariateAnomalyDetector using both data and historical_data (if provided).
Args:
data: TimeSeriesData on which detection is run.
historical_data: Optional; TimeSeriesData corresponding to history. History ends
exactly where the data begins.
Returns:
None.
"""
if historical_data is None:
total_data = data
else:
historical_data.extend(data)
total_data = historical_data
self.model = MultivariateAnomalyDetector(
data=total_data,
params=self.params,
training_days=self.training_days,
model_type=self.model_type,
)
self.model.detector()
def predict(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
**kwargs: Any,
) -> AnomalyResponse:
"""Get anomaly scores.
Get anomaly scores only for data.
Args:
data: TimeSeriesData on which detection is run
historical_data: Optional; TimeSeriesData corresponding to history. History ends
exactly where the data begins.
Returns:
AnomalyResponse object. The length of this obj.ect is same as data. The score property
gives the score for anomaly.
"""
if self.model is None:
self.fit(data=data, historical_data=historical_data)
assert self.model is not None
output_scores_df = self.model.anomaly_score_df
assert output_scores_df is not None
output_scores_df = output_scores_df[output_scores_df.index >= data.time.min()]
zeros = np.zeros(shape=(data.time.shape[0], output_scores_df.shape[1]))
padding = np.empty(
shape=[len(data) - output_scores_df.shape[0], output_scores_df.shape[1]]
)
padding[:] = np.NaN
padding = pd.DataFrame(padding, columns=output_scores_df.columns, copy=False)
# all fields other than scores are left as TimeSeriesData with all zero values
response = AnomalyResponse(
scores=TimeSeriesData(
time=data.time,
value=pd.concat(
[padding.iloc[:, :-2], output_scores_df.iloc[:, :-2]],
ignore_index=True,
copy=False,
),
),
confidence_band=ConfidenceBand(
upper=TimeSeriesData(
time=data.time, value=pd.DataFrame(zeros, copy=False)
),
lower=TimeSeriesData(
time=data.time, value=pd.DataFrame(zeros, copy=False)
),
),
predicted_ts=TimeSeriesData(
time=data.time, value=pd.DataFrame(zeros).iloc[:, :-2]
),
anomaly_magnitude_ts=TimeSeriesData(
time=data.time,
value=pd.concat(
[padding.iloc[:, -2], output_scores_df.iloc[:, -2]],
ignore_index=True,
copy=False,
),
),
stat_sig_ts=TimeSeriesData(
time=data.time,
value=pd.concat(
[padding.iloc[:, -1], output_scores_df.iloc[:, -1]],
ignore_index=True,
copy=False,
),
),
)
return response
def fit_predict(
self,
data: TimeSeriesData,
historical_data: Optional[TimeSeriesData] = None,
**kwargs: Any,
) -> AnomalyResponse:
"""Fit a model and return the anomaly scores.
Return AnomalyResponse, when data is passed to it.
Args:
data: TimeSeriesData on which detection is run.
historical_data: Optional; TimeSeriesData corresponding to history. History ends
exactly where the data begins.
Returns:
AnomalyResponse object. The length of this object is same as data. The score property
gives the score for anomaly.
"""
self.fit(data=data, historical_data=historical_data)
return self.predict(data=data)
| true |
2b3dfc91f5349affaeccf18afd4ced315045de4e | Python | DadzieAma/MyFiles | /leap.py | UTF-8 | 431 | 4.40625 | 4 | [] | no_license | def get_age():
age = int(input("Enter the year you were born: "))
if(age%4) == 0:
if(age%100) == 0:
if(age%400) == 0:
print("{0} is a leap year".format(age))
else:
print("{0} is not a leap year".format(age))
else:
print("{0} is a leap year".format(age))
else:
print("{0} is not a leap year".format(age))
get_age() | true |
66d77302a9013e262d43b9195ba2500b4f27ff76 | Python | AlthafHussainK/python_lab | /structs1.py | UTF-8 | 328 | 3.5 | 4 | [] | no_license | import csv
from student import Student
students = []
for i in range(3):
name = input("name: ")
place = input("place: ")
students.append(Student(name, place))
file = open("students.csv", "w")
writer = csv.writer(file)
for student in students:
writer.writerow((student.name, student.place))
file.close()
| true |
29754ee163d31e5646ca85cb3cf6175b1734ab79 | Python | ksenyavasina/Cell-Phones-and-Accessories- | /word analysis.py | UTF-8 | 1,241 | 2.96875 | 3 | [] | no_license | import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
data = pd.read_csv('data.csv', delimiter=',')
df = data[data['review/score'] != 3]
X = df['review/text']
y_dict = {1:0, 2:0, 4:1, 5:1}
y = df['review/score'].map(y_dict)
def word_analysis(X, y, model, clf_model):
X_m = model.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_m, y, random_state=0)
clf = clf_model.fit(X_train, y_train)
ACU = clf.score(X_test, y_test)
print('Model Accuracy: {}'.format(ACU))
w = model.get_feature_names()
coef = clf.coef_.tolist()[0]
coeff_df = pd.DataFrame({'Word': w, 'Coefficient': coef})
coeff_df = coeff_df.sort_values(['Coefficient', 'Word'], ascending=[0, 1])
print('')
print('-Top 20 positive-')
print(coeff_df.head(20).to_string(index=False))
print('')
print('-Top 20 negative-')
print(coeff_df.tail(20).to_string(index=False))
c = CountVectorizer(stop_words='english')
tfidf = TfidfVectorizer(stop_words = 'english')
word_analysis(X, y, tfidf, LogisticRegression())
| true |
637b58f67780dc7d8d1fc392855280c36472b8f3 | Python | mattEhall/FrequencyDomain | /FrequencyDomain.py | UTF-8 | 61,182 | 2.78125 | 3 | [] | no_license | # 2020-05-03: This is a start at a frequency-domain floating support structure model for WEIS-WISDEM.
# Based heavily on the GA-based optimization framework from Hall 2013
# 2020-05-23: Starting as a script that will evaluate a design based on properties/data provided in separate files.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(1, '/code/MoorPy')
import MoorPy as mp
# reload the libraries each time in case we make any changes
import importlib
mp = importlib.reload(mp)
## This class represents linear (for now cylinderical) components in the substructure.
# It is meant to correspond to Member objects in the WEIS substructure ontology, but containing only
# the properties relevant for the Level 1 frequency-domain model, as well as additional data strucutres
# used during the model's operation.
class Member:
def __init__(self, strin, nw):
'''Initialize a Member. For now, this function accepts a space-delimited string with all member properties.
'''
# note: haven't decided yet how to lump masses and handle segments <<<<
entries = strin.split()
self.id = np.int(entries[0])
self.type = np.int(entries[1])
self.dA = np.float(entries[2]) # diameter of (lower) node
self.dB = np.float(entries[3])
self.rA = np.array(entries[4:7], dtype=np.double) # x y z of lower node
self.rB = np.array(entries[7:10], dtype=np.double)
self.t = np.float(entries[10]) # shell thickness [m]
self.l_fill = np.float(entries[11]) # length of member (from end A to B) filled with ballast [m]
self.rho_fill = np.float(entries[12]) # density of ballast in member [kg/m^3]
rAB = self.rB-self.rA
self.l = np.linalg.norm(rAB) # member length
self.q = rAB/self.l # member axial unit vector
self.p1 = np.zeros(3) # member transverse unit vectors (to be filled in later)
self.p2 = np.zeros(3) # member transverse unit vectors
self.Cd_q = 0.1 # drag coefficients
self.Cd_p1 = 0.6
self.Cd_p2 = 0.6
self.Ca_q = 0.0 # added mass coefficients
self.Ca_p1 = 0.97
self.Ca_p2 = 0.97
self.n = 10 # number of nodes per member
self.dl = self.l/self.n #lumped node lenght (I guess, for now) <<<
self.w = 1 # mass per unit length (kg/m)
self.r = np.zeros([self.n,3]) # undisplaced node positions along member [m]
self.d = np.zeros( self.n ) # local diameter along member [m]
for i in range(self.n):
self.r[i,:] = self.rA + (i/(self.n-1))*rAB # spread evenly for now
self.d[i] = self.dA + (i/(self.n-1))*(self.dB-self.dA) # spread evenly since uniform taper
# complex frequency-dependent amplitudes of quantities at each node along member (to be filled in later)
self.dr = np.zeros([self.n,3,nw], dtype=complex) # displacement
self.v = np.zeros([self.n,3,nw], dtype=complex) # velocity
self.a = np.zeros([self.n,3,nw], dtype=complex) # acceleration
self.u = np.zeros([self.n,3,nw], dtype=complex) # wave velocity
self.ud = np.zeros([self.n,3,nw], dtype=complex) # wave acceleration
def getDirections(self):
'''Returns the direction unit vector for the member based on its end positions'''
q = self.q
p1 = np.cross( np.array([0,1,0]), q) # transverse unit direction vector in X-Z plane
p2 = np.cross( q, p1 ) # the other transverse unit vector
return q, p1, p2
def getInertia(self):
# Volume of steel based on the shell thickness [m^3]
dAi = self.dA - 2*self.t
dBi = self.dB - 2*self.t
V_outer = (np.pi/4)*(1/3)*(self.dA**2+self.dB**2+self.dA*self.dB)*self.l
V_inner = (np.pi/4)*(1/3)*(dAi**2+dBi**2+dAi*dBi)*self.l
v_steel = V_outer - V_inner #[m^3] Volume of steel of the member ((self.t/2)*(self.dA+self.dB)-self.t**2)*np.pi*self.l
# Ballast (future work - this doesn't account for any angle in the member. If the member is horizontal, the ballast doesn't follow gravity)
dB_fill = (dBi-dAi)*(self.l_fill/self.l) + dAi # interpolated diameter of member where the ballast is filled to
v_fill = (np.pi/4)*(1/3)*(dAi**2+dB_fill**2+dAi*dB_fill)*self.l_fill #[m^3]
m_fill = self.rho_fill*v_fill #[kg]
# Center of mass
hco = self.l*((self.dA**2 + 2*self.dA*self.dB + 3*self.dB**2)/(4*(self.dA**2 + self.dA*self.dB + self.dB**2)))
hci = self.l*((dAi**2 + 2*dAi*dBi + 3*dBi**2)/(4*(dAi**2 + dAi*dBi + dBi**2)))
hc_shell = ((hco*V_outer)-(hci*V_inner))/(V_outer-V_inner) # [m] CoG of member shell in relation to bottom node @ self.rA
hc_fill = self.l_fill*((dAi**2 + 2*dAi*dB_fill + 3*dB_fill**2)/(4*(dAi**2 + dAi*dB_fill + dB_fill**2)))
hc = ((hc_fill*self.rho_fill*v_fill)+(hc_shell*rho_steel*v_steel))/((self.rho_fill*v_fill)+(rho_steel*v_steel))
center = self.rA + (self.q*hc)
# Moment of Inertia (equations from HydroDyn paper)
# Calc I@end for outer solid - Calc I@end for "inner" solid - I_outer-I_inner = I_shell @end - PA theorem to calc I@CoG
r1 = self.dA/2
r2 = self.dB/2
m = (r2-r1)/self.l
r1i = (self.dA/2)-self.t
r2i = (self.dB/2)-self.t
mi = (r2i-r1i)/self.l
if m==0:
Ir_end_outer = (1/12)*(rho_steel*self.l*np.pi*r1**2)*(3*r1**2 + 4*self.l**2) #[kg-m^2] about end node
Ir_end_inner = (1/12)*(rho_steel*self.l*np.pi*r1i**2)*(3*r1i**2 + 4*self.l**2) #[kg-m^2] about end node
Ir_end_steel = Ir_end_outer - Ir_end_inner # I_outer - I_inner = I_shell -- about end node
#Ir_end_steel = (1/12)*v_steel*rho_steel*(3*(r1**2 + r1i**2) + 4*self.l**2)
Ir_end_fill = (1/12)*(self.rho_fill*self.l_fill*np.pi*r1i**2)*(2*r1i**2 + 4*self.l_fill**2) #[kg-m^2] about end node
Ir_end = Ir_end_steel + Ir_end_fill
I_rad = Ir_end - ((rho_steel*v_steel)+m_fill)*hc**2
#I_rad_steel = Ir_end - (rho_steel*v_steel)*hc**2 # about CoG
#I_rad_fill = Ir_end_fill - m_fill*hc**2 # about CoG
#I_rad = I_rad_steel + I_rad_fill # sum of all masses about the CoG
I_ax_outer = (1/2)*rho_steel*np.pi*self.l*(r1**4)
I_ax_inner = (1/2)*rho_steel*np.pi*self.l*(r1i**4)
I_ax_steel = I_ax_outer - I_ax_inner
I_ax_fill = (1/2)*self.rho_fill*np.pi*self.l_fill*(r1i**4)
I_ax = I_ax_steel + I_ax_fill
else:
Ir_tip_outer = abs((np.pi/20)*(rho_steel/m)*(1+(4/m**2))*(r2**5-r1**5)) # outer, about tip
Ir_end_outer = abs(Ir_tip_outer - ((rho_steel/(3*m**2))*np.pi*(r2**3-r1**3)*((r1/m)+2*hc)*r1)) # outer, about node
Ir_tip_inner = abs((np.pi/20)*(rho_steel/mi)*(1+(4/mi**2))*(r2i**5-r1i**5)) # inner, about tip
Ir_end_inner = abs(Ir_tip_inner - ((rho_steel/(3*mi**2))*np.pi*(r2i**3-r1i**3)*((r1i/mi)+2*hc)*r1i)) # inner, about node
Ir_end = Ir_end_outer - Ir_end_inner # shell, about node
I_rad_steel = Ir_end - (rho_steel*v_steel)*hc**2 # shell, about CoG by PAT
I_ax_outer = (rho_steel*np.pi/(10*m))*(r2**5-r1**5)
I_ax_inner = (rho_steel*np.pi/(10*mi))*(r2i**5-r1i**5)
I_ax_steel = I_ax_outer - I_ax_inner
if self.l_fill == 0:
I_rad_fill = 0
I_ax_fill = 0
else:
r2_fill = dB_fill/2
mi_fill = (r2_fill-r1i)/self.l_fill
Ir_tip_fill = abs((np.pi/20)*(rho_steel/mi_fill)*(1+(4/mi_fill**2))*(r2_fill**5-r1i**5))
Ir_end_fill = abs(Ir_tip_fill - ((self.rho_fill/(3*mi_fill**2))*np.pi*(r2_fill**3-r1i**3)*((r1i/mi_fill)+2*hc)*r1i)) # inner, about node
I_rad_fill = Ir_end_fill - m_fill*hc**2 # about CoG
I_ax_fill = (self.rho_fill*np.pi/(10*mi_fill))*(r2_fill**5-r1i**5)
I_rad = I_rad_steel + I_rad_fill # about CoG
I_ax = I_ax_steel + I_ax_fill
return v_steel, center, I_rad, I_ax, m_fill
def getHydrostatics(self):
'''Calculates member hydrostatic properties, namely buoyancy and stiffness matrix'''
# partially submerged case
if self.r[0,2]*self.r[-1,2] <= 0: # if member crosses (or touches) water plane
# angles
beta = np.arctan2(q[1],q[0]) # member incline heading from x axis
phi = np.arctan2(np.sqrt(q[0]**2 + q[1]**2), q[2]) # member incline angle from vertical
# precalculate trig functions
cosPhi=np.cos(phi)
sinPhi=np.sin(phi)
tanPhi=np.tan(phi)
cosBeta=np.cos(beta)
sinBeta=np.sin(beta)
tanBeta=sinBeta/cosBeta
# -------------------- buoyancy and waterplane area properties ------------------------
dWP = np.interp(0, self.r[:,2], self.d) # diameter of member where its axis crosses the waterplane
xWP = np.interp(0, self.r[:,2], self.r[:,0]) # x coordinate where member axis cross the waterplane [m]
yWP = np.interp(0, self.r[:,2], self.r[:,1]) # y coordinate where member axis cross the waterplane [m]
AWP = (np.pi/4)*dWP**2 # waterplane area of member [m^2]
IWP = (np.pi/64)*dWP**4 # waterplane moment of inertia [m^4] approximates the waterplane area as the shape of a circle
LWP = abs(self.r[0,2])/cosPhi # get length of member that is still underwater. Assumes self.r is about global coords -> z=0 @ SWL
# Total enclosed underwater volume
V_UW = (np.pi/4)*(1/3)*(self.dA**2+dWP**2+self.dA*dWP)*LWP #[m^3]
L_center = TaperCV(0.5*self.dA, 0.5*dWP, LWP) # distance from end A to center of buoyancy of member [m]
r_center = self.rA + self.q*L_center # absolute coordinates of center of volume [m]
# >>>> question: should this function be able to use displaced/rotated values? <<<<
# ------------- get hydrostatic derivatives ----------------
# derivatives from global to local
dPhi_dThx = -sinBeta # \frac{d\phi}{d\theta_x} = \sin\beta
dPhi_dThy = cosBeta
#dBeta_dThx = -cosBeta/tanBeta**2
#dBeta_dThy = -sinBeta/tanBeta**2
# note: below calculations are based on untapered case, but
# temporarily approximated for taper by using dWP (diameter at water plane crossing) <<< this is rough
# buoyancy force and moment about end A
Fz = rho*g* V_UW
M = -rho*g*pi*( dWP**2/32*(2.0 + tanPhi**2) + 0.5*(self.rA[2]/cosPhi)**2)*sinPhi # moment about axis of incline
Mx = M*dPhi_dThx
My = M*dPhi_dThy
Fvec = np.zeros(6) # buoyancy force (and moment) vector (about PRP)
Fvec[2] = Fz # vertical buoyancy force [N]
Fvec[3] = Mx + Fz*self.rA[1] # moment about x axis [N-m]
Fvec[4] = My - Fz*self.rA[0] # moment about y axis [N-m]
# derivatives aligned with incline heading
dFz_dz = -rho*g*pi*0.25*dWP**2 /cosPhi
dFz_dPhi = rho*g*pi*0.25*dWP**2*self.rA[2]*sinPhi/cosPhi**2
dM_dz = 1.0*dFz_dPhi
dM_dPhi = -rho*g*pi*0.25*dWP**2 * (dWP**2/32*(2*cosPhi + sinPhi**2/cosPhi + 2*sinPhi/cosPhi**2) + 0.5*self.rA[2]**2*(sinPhi**2+1)/cosPhi**3)
# <<<<<<<<<<<< (end warning) <<<<<<<<<
# derivatives in global coordinates about platform reference point
#dFz_dz is already taken care of
dFz_dThx = -dFz_dz*self.rA[1] + dFz_dPhi*dPhi_dThx
dFz_dThy = dFz_dz*self.rA[0] + dFz_dPhi*dPhi_dThy
dMx_dz = -dFz_dz*self.rA[1] + dM_dz*dPhi_dThy # = dFz_dThx
dMy_dz = dFz_dz*self.rA[0] + dM_dz*dPhi_dThx # = dFz_dThy
dMx_dThx = ( dFz_dz*self.rA[1] + dFz_dPhi*dPhi_dThx)*self.rA[1] + dM_dPhi*dPhi_dThx*dPhi_dThx
dMx_dThy = (-dFz_dz*self.rA[0] + dFz_dPhi*dPhi_dThy)*self.rA[1] + dM_dPhi*dPhi_dThx*dPhi_dThy
dMy_dThx =-( dFz_dz*self.rA[1] + dFz_dPhi*dPhi_dThy)*self.rA[0] + dM_dPhi*dPhi_dThy*dPhi_dThx
dMy_dThy =-(-dFz_dz*self.rA[0] + dFz_dPhi*dPhi_dThy)*self.rA[0] + dM_dPhi*dPhi_dThy*dPhi_dThy
# fill in stiffness matrix
Cmat = np.zeros([6,6]) # hydrostatic stiffness matrix (about PRP)
'''
Cmat[2,2] = -dFz_dz
Cmat[2,3] = -dFz_dThx
Cmat[2,4] = -dFz_dThy
Cmat[3,2] = -dMx_dz
Cmat[4,2] = -dMy_dz # ignoring symmetries for now, as a way to check equations
Cmat[3,3] = -dMx_dThx
Cmat[3,4] = -dMx_dThy
Cmat[4,3] = -dMy_dThx
Cmat[4,4] = -dMy_dThy
'''
# normal approach to hydrostatic stiffness, using this temporarily until above fancier approach is verified
Iwp = np.pi*dWP**4/64 # [m^4] Moment of Inertia of the waterplane
Iwp = np.pi*dWP**4/64 # [m^4] Moment of Inertia of the waterplane
Cmat[2,2] = -dFz_dz
Cmat[2,3] = rho*g*( -AWP*yWP )
Cmat[2,4] = rho*g*( AWP*xWP )
Cmat[3,2] = rho*g*( -AWP*yWP )
Cmat[3,3] = rho*g*(IWP + AWP*yWP**2 )
Cmat[3,4] = rho*g*( AWP*xWP*yWP)
Cmat[4,2] = rho*g*( AWP*xWP )
Cmat[4,3] = rho*g*( AWP*xWP*yWP)
Cmat[4,4] = rho*g*(IWP + AWP*xWP**2 )
# fully submerged case
elif self.r[0,2] <= 0 and self.r[-1,2] <= 0:
AWP = 0
IWP = 0
xWP = 0
yWP = 0
V_UW = TaperV( 0.5*self.dA, 0.5*self.dB, self.l) # displaced volume of member [m^3]
alpha = TaperCV(0.5*self.dA, 0.5*self.dB, 1.0) # relative location of center of volume from end A (0) to B (1)
r_center = self.rA*(1.0-alpha) + self.rB*alpha # absolute coordinates of center of volume [m]
# buoyancy force (and moment) vector
Fvec = translateForce3to6DOF( r_center, np.array([0, 0, rho*g*V_UW]) )
# hydrostatic stiffness matrix (about end A)
Cmat = np.zeros([6,6])
Cmat[3,3] = rho*g*V_UW * r_center[2]
Cmat[4,4] = rho*g*V_UW * r_center[2]
else: # if the members are fully above the surface
AWP = 0
IWP = 0
xWP = 0
yWP = 0
V_UW = 0
r_center = np.zeros(3)
Fvec = np.zeros(6)
Cmat = np.zeros([6,6])
return Fvec, Cmat, V_UW, r_center, AWP, IWP, xWP, yWP
def TaperV(R1, R2, H):
'''returns the volume of a cylindrical section, possibly with taper'''
if R1 == R2: # if just a cylinder
return np.pi*R1*R1*H
#taperCV = H/2.0
elif R1 == 0: # seperate this case out because it gives a divide by zero in general formula
return 1./3.*np.pi*R2*R2*H; # cone volume
#taperCV = 3./4.*H # from base
else:
coneH = H/(1.-R2/R1); # conical height
coneV = 1./3.*np.pi*R1*R1*coneH; # cone volume
coneVtip = 1./3.*np.pi*R2*R2*(coneH-H); # height from end of taper to cone tip
return coneV-coneVtip; # taper volume
#taperCV = ( coneV*1./4.*coneH - coneVtip*(1./4.*(coneH-H) + H) )/ taperV # from base
return taperV
def TaperCV(R1, R2, H):
'''returns the height of the center of buoyancy from the lower node'''
return H*(R1**2 + 2*R1*R2 + 3*R2**2)*0.25/(R1**2 + R1*R2 + R2**2)
def getVelocity(r, Xi, ws):
'''Get node complex velocity spectrum based on platform motion's and relative position from PRP'''
nw = len(ws)
dr = np.zeros([3,nw], dtype=complex) # node displacement complex amplitudes
v = np.zeros([3,nw], dtype=complex) # velocity
a = np.zeros([3,nw], dtype=complex) # acceleration
for i in range(nw):
dr[:,i] = Xi[:3,i] + SmallRotate(r, Xi[3:,i])
v[ :,i] = 1j*ws[i]*dr[:,i]
a[ :,i] = 1j*ws[i]*v[ :,i]
return dr, v, a # node dispalcement, velocity, acceleration (Each [3 x nw])
## Get wave velocity and acceleration complex amplitudes based on wave spectrum at a given location
def getWaveKin(zeta0, w, k, h, r):
# inputs: wave elevation fft, wave freqs, wave numbers, depth, point position
beta = 0 # no wave heading for now
zeta = np.zeros(nw , dtype=complex ) # local wave elevation
u = np.zeros([3,nw], dtype=complex) # local wave kinematics velocity
ud = np.zeros([3,nw], dtype=complex) # local wave kinematics acceleration
for i in range(nw):
# .............................. wave elevation ...............................
zeta[i] = zeta0[i]* np.exp( -1j*(k[i]*(np.cos(beta)*r[0] + np.sin(beta)*r[1]))) # shift each zetaC to account for location
# ...................... wave velocities and accelerations ............................
z = r[2]
# only process wave kinematics if this node is submerged
if z < 0:
# Calculate SINH( k*( z + h ) )/SINH( k*h ) and COSH( k*( z + h ) )/SINH( k*h )
# given the wave number, k, water depth, h, and elevation z, as inputs.
if ( k[i] == 0.0 ): # When .TRUE., the shallow water formulation is ill-conditioned; thus, the known value of unity is returned.
SINHNumOvrSIHNDen = 1.0
COSHNumOvrSIHNDen = 99999
elif ( k[i]*h > 89.4 ): # When .TRUE., the shallow water formulation will trigger a floating point overflow error; however, with h > 14.23*wavelength (since k = 2*Pi/wavelength) we can use the numerically-stable deep water formulation instead.
SINHNumOvrSIHNDen = np.exp( k[i]*z );
COSHNumOvrSIHNDen = np.exp( k[i]*z );
else: # 0 < k*h <= 89.4; use the shallow water formulation.
SINHNumOvrSIHNDen = np.sinh( k[i]*( z + h ) )/np.sinh( k[i]*h );
COSHNumOvrSIHNDen = np.cosh( k[i]*( z + h ) )/np.sinh( k[i]*h );
# Fourier transform of wave velocities
u[0,i] = w[i]* zeta[i]*COSHNumOvrSIHNDen *np.cos(beta)
u[1,i] = w[i]* zeta[i]*COSHNumOvrSIHNDen *np.sin(beta)
u[2,i] = 1j*w[i]* zeta[i]*SINHNumOvrSIHNDen
# Fourier transform of wave accelerations
ud[:,i] = 1j*w[i]*u[:,i]
return u, ud
# calculate wave number based on wave frequency in rad/s and depth
def waveNumber(omega, h, e=0.001):
g = 9.81
# omega - angular frequency of waves
k = 0 #initialize k outside of loop
#error tolerance
k1 = omega*omega/g #deep water approx of wave number
k2 = omega*omega/(np.tanh(k1*h)*g) #general formula for wave number
while np.abs(k2 - k1)/k1 > e: #repeate until converges
k1 = k2
k2 = omega*omega/(np.tanh(k1*h)*g)
k = k2
return k
# translate point at location r based on three small angles in th
def SmallRotate(r, th):
rt = np.zeros(3, dtype=complex) # translated point
rt[0] = th[2]*r[1] - th[1]*r[2]
rt[0] = th[2]*r[0] - th[0]*r[2]
rt[0] = th[1]*r[0] - th[0]*r[1]
return rt
# given a size-3 vector, vec, return the matrix from the multiplication vec * vec.transpose
def VecVecTrans(vec):
vvt = np.zeros([3,3])
for i in range(3):
for j in range(3):
vvt[i,j]= vec[i]*vec[j]
return vvt
# produce alternator matrix
def getH(r):
H = np.zeros([3,3])
H[0,1] = r[2];
H[1,0] = -r[2];
H[0,2] = -r[1];
H[2,0] = r[1];
H[1,2] = r[0];
H[2,1] = -r[0];
return H
def translateForce3to6DOF(r, Fin):
'''Takes in a position vector and a force vector (applied at the positon), and calculates
the resulting 6-DOF force and moment vector.
:param array r: x,y,z coordinates at which force is acting [m]
:param array Fin: x,y,z components of force [N]
:return: the resulting force and moment vector
:rtype: array
'''
Fout = np.zeros(6, dtype=Fin.dtype) # initialize output vector as same dtype as input vector (to support both real and complex inputs)
Fout[:3] = Fin
Fout[3:] = np.cross(r, Fin)
return Fout
# translate mass matrix to make 6DOF mass-inertia matrix
def translateMatrix3to6DOF(r, Min):
'''Transforms a 3x3 matrix to be about a translated reference point, resulting in a 6x6 matrix.'''
# sub-matrix definitions are accordint to | m J |
# | J^T I |
# note that the J term and I terms are zero in this case because the input is just a mass matrix (assumed to be about CG)
H = getH(r) # "anti-symmetric tensor components" from Sadeghi and Incecik
Mout = np.zeros([6,6]) #, dtype=complex)
# mass matrix [m'] = [m]
Mout[:3,:3] = Min
# product of inertia matrix [J'] = [m][H] + [J]
Mout[:3,3:] = np.matmul(Min, H)
Mout[3:,:3] = Mout[:3,3:].T
# moment of inertia matrix [I'] = [H][m][H]^T + [J]^T [H] + [H]^T [J] + [I]
Mout[3:,3:] = np.matmul(np.matmul(H,Min), H.T)
return Mout
def translateMatrix6to6DOF(r, Min):
'''Transforms a 6x6 matrix to be about a translated reference point.'''
# sub-matrix definitions are accordint to | m J |
# | J^T I |
H = getH(r) # "anti-symmetric tensor components" from Sadeghi and Incecik
Mout = np.zeros([6,6]) #, dtype=complex)
# mass matrix [m'] = [m]
Mout[:3,:3] = Min[:3,:3]
# product of inertia matrix [J'] = [m][H] + [J]
Mout[:3,3:] = np.matmul(Min[:3,:3], H) + Min[:3,3:]
Mout[3:,:3] = Mout[:3,3:].T
# moment of inertia matrix [I'] = [H][m][H]^T + [J]^T [H] + [H]^T [J] + [I]
Mout[3:,3:] = np.matmul(np.matmul(H,Min[:3,:3]), H.T) + np.matmul(Min[3:,:3], H) + np.matmul(H.T, Min[:3,3:]) + Min[3:,3:]
return Mout
def JONSWAP(ws, Hs, Tp, Gamma=1.0):
'''Returns the JONSWAP wave spectrum for the given frequencies and parameters.
Parameters
----------
ws : float | array
wave frequencies to compute spectrum at (scalar or 1-D list/array) [rad/s]
Hs : float
significant wave height of spectrum [m]
Tp : float
peak spectral period [s]
Gamma : float
wave peak shape parameter []. The default value of 1.0 gives the Pierson-Moskowitz spectrum.
Returns
-------
S : array
wave power spectral density array corresponding to frequencies in ws [m^2/Hz]
This function calculates and returns the one-sided power spectral density spectrum
at the frequency/frequencies (ws) based on the provided significant wave height,
peak period, and (optionally) shape parameter gamma.
This formula for the JONSWAP spectrum is adapted from FAST v7 and based
on what's documented in IEC 61400-3.
'''
# handle both scalar and array inputs
if isinstance(ws, (list, tuple, np.ndarray)):
ws = np.array(ws)
else:
ws = np.array([ws])
# initialize output array
S = np.zeros(len(ws))
# the calculations
f = 0.5/np.pi * ws # wave frequencies in Hz
fpOvrf4 = pow((Tp*f), -4.0) # a common term, (fp/f)^4 = (Tp*f)^(-4)
C = 1.0 - ( 0.287*np.log(Gamma) ) # normalizing factor
Sigma = 0.07*(f <= 1.0/Tp) + 0.09*(f > 1.0/Tp) # scaling factor
Alpha = np.exp( -0.5*((f*Tp - 1.0)/Sigma)**2 )
return 0.5/np.pi *C* 0.3125*Hs*Hs*fpOvrf4/f *np.exp( -1.25*fpOvrf4 )* Gamma**Alpha
def printMat(mat):
'''Print a matrix'''
for i in range(mat.shape[0]):
print( "\t".join(["{:+8.3e}"]*mat.shape[1]).format( *mat[i,:] ))
def printVec(vec):
'''Print a vector'''
print( "\t".join(["{:+8.3e}"]*len(vec)).format( *vec ))
# ------------------------------- basic setup -----------------------------------------
nDOF = 6
w = np.arange(.01, 3, 0.01) # angular frequencies tp analyze (rad/s)
nw = len(w) # number of frequencies
k= np.zeros(nw) # wave number
# ----------------------- member-based platform description --------------------------
# (hard-coded for now - set to OC3 Hywind Spar geometry - eventually these will be provided as inputs instead)
# list of member objects
memberList = []
# -------------------- OC3 Hywind Spar ----------------------
'''
# ------------------ turbine Tower description ------------------
# diameters and thicknesses linearly interpolated from dA[0] to dB[-1] and t[0] to t[-1]
# number type dA dB xa ya za xb yb zb t l_fill rho_ballast
memberList.append(Member(" 1 1 6.500 6.237 0.0 0.0 10.00 0.0 0.0 17.76 0.0270 0.0 1025.0 ", nw))
memberList.append(Member(" 2 1 6.237 5.974 0.0 0.0 17.76 0.0 0.0 25.52 0.0262 0.0 1025.0 ", nw))
memberList.append(Member(" 3 1 5.974 5.711 0.0 0.0 25.52 0.0 0.0 33.28 0.0254 0.0 1025.0 ", nw))
memberList.append(Member(" 4 1 5.711 5.448 0.0 0.0 33.28 0.0 0.0 41.04 0.0246 0.0 1025.0 ", nw))
memberList.append(Member(" 5 1 5.448 5.185 0.0 0.0 41.04 0.0 0.0 48.80 0.0238 0.0 1025.0 ", nw))
memberList.append(Member(" 6 1 5.185 4.922 0.0 0.0 48.80 0.0 0.0 56.56 0.0230 0.0 1025.0 ", nw))
memberList.append(Member(" 7 1 4.922 4.659 0.0 0.0 56.56 0.0 0.0 64.32 0.0222 0.0 1025.0 ", nw))
memberList.append(Member(" 8 1 4.659 4.396 0.0 0.0 64.32 0.0 0.0 72.08 0.0214 0.0 1025.0 ", nw))
memberList.append(Member(" 9 1 4.396 4.133 0.0 0.0 72.08 0.0 0.0 79.84 0.0206 0.0 1025.0 ", nw))
memberList.append(Member("10 1 4.133 3.870 0.0 0.0 79.84 0.0 0.0 87.60 0.0198 0.0 1025.0 ", nw))
# ---------- spar platform substructure description --------------
memberList.append(Member("11 2 9.400 9.400 0.0 0.0 -120. 0.0 0.0 -12.0 0.0270 52. 1850.0 ", nw))
#memberList.append(Member("11 2 9.400 9.400 0.0 0.0 -120. 0.0 0.0 -12.0 0.066 41.4 2000.0 ", nw))
memberList.append(Member("12 2 9.400 6.500 0.0 0.0 -12.0 0.0 0.0 -4.00 0.0270 0.0 1025.0 ", nw))
memberList.append(Member("13 2 6.500 6.500 0.0 0.0 -4.00 0.0 0.0 10.00 0.0270 0.0 1025.0 ", nw))
#memberList.append(Member("1 2 6.500 6.500 0.0 0.0 -100.00 0.0 0.0 0.00 0.0270 0.0 1025.0 ", nw))
# -------------------------- turbine RNA description ------------------------
# below are rough properties for NREL 5 MW reference turbine
mRNA = 110000 + 240000 # RNA mass [kg]
IxRNA = 11776047*(1 + 1 + 1) + 115926 # RNA moment of inertia about local x axis (assumed to be identical to rotor axis for now, as approx) [kg-m^2]
IrRNA = 11776047*(1 +.5 +.5) + 2607890 # RNA moment of inertia about local y or z axes [kg-m^2]
xCG_RNA = 0 # x location of RNA center of mass [m] (Close enough to -0.27 m)
hHub = 90.0 # hub height above water line [m]
Fthrust = 800e3 # peak thrust force, [N]
'''
# ------------------- DTU 10 MW Spar --------------------------
# ------------------ turbine Tower description ------------------
# number type dA dB xa ya za xb yb zb t l_fill rho_ballast
# new version from 11-114.7 (for OpenFAST reasons)
memberList.append(Member(" 1 1 8.00 7.75 0.0 0.0 11.00 0.0 0.0 21.37 0.038 0.0 1025.0 ", nw))
memberList.append(Member(" 2 1 7.75 7.50 0.0 0.0 21.37 0.0 0.0 31.74 0.036 0.0 1025.0 ", nw))
memberList.append(Member(" 3 1 7.50 7.25 0.0 0.0 31.74 0.0 0.0 42.11 0.034 0.0 1025.0 ", nw))
memberList.append(Member(" 4 1 7.25 7.00 0.0 0.0 42.11 0.0 0.0 52.48 0.032 0.0 1025.0 ", nw))
memberList.append(Member(" 5 1 7.00 6.75 0.0 0.0 52.48 0.0 0.0 62.85 0.030 0.0 1025.0 ", nw))
memberList.append(Member(" 6 1 6.75 6.50 0.0 0.0 62.85 0.0 0.0 73.22 0.028 0.0 1025.0 ", nw))
memberList.append(Member(" 7 1 6.50 6.25 0.0 0.0 73.22 0.0 0.0 83.59 0.026 0.0 1025.0 ", nw))
memberList.append(Member(" 8 1 6.25 6.00 0.0 0.0 83.59 0.0 0.0 93.96 0.024 0.0 1025.0 ", nw))
memberList.append(Member(" 9 1 6.00 5.75 0.0 0.0 93.96 0.0 0.0 104.33 0.022 0.0 1025.0 ", nw))
memberList.append(Member("10 1 5.75 5.50 0.0 0.0 104.33 0.0 0.0 114.70 0.020 0.0 1025.0 ", nw))
# =============================================================================
# # old version from 13-116.63
# memberList.append(Member(" 1 1 8.00 7.75 0.0 0.0 13.000 0.0 0.0 23.363 0.038 0.0 1025.0 ", nw))
# memberList.append(Member(" 2 1 7.75 7.50 0.0 0.0 23.363 0.0 0.0 33.726 0.036 0.0 1025.0 ", nw))
# memberList.append(Member(" 3 1 7.50 7.25 0.0 0.0 33.726 0.0 0.0 44.089 0.034 0.0 1025.0 ", nw))
# memberList.append(Member(" 4 1 7.25 7.00 0.0 0.0 44.089 0.0 0.0 54.452 0.032 0.0 1025.0 ", nw))
# memberList.append(Member(" 5 1 7.00 6.75 0.0 0.0 54.452 0.0 0.0 64.815 0.030 0.0 1025.0 ", nw))
#
# memberList.append(Member(" 6 1 6.75 6.50 0.0 0.0 64.815 0.0 0.0 75.178 0.028 0.0 1025.0 ", nw))
# memberList.append(Member(" 7 1 6.50 6.25 0.0 0.0 75.178 0.0 0.0 85.541 0.026 0.0 1025.0 ", nw))
# memberList.append(Member(" 8 1 6.25 6.00 0.0 0.0 85.541 0.0 0.0 95.904 0.024 0.0 1025.0 ", nw))
# memberList.append(Member(" 9 1 6.00 5.75 0.0 0.0 95.904 0.0 0.0 106.267 0.022 0.0 1025.0 ", nw))
# memberList.append(Member("10 1 5.75 5.50 0.0 0.0 106.267 0.0 0.0 116.630 0.020 0.0 1025.0 ", nw))
# =============================================================================
# ---------- spar platform substructure description --------------
# =============================================================================
# Ballast members from Senu's sizing
# memberList.append(Member("11 2 14.75 14.75 0.0 0.0 -90. 0.0 0.0 -85.2 0.046 4.8 3743.42 ", nw))
# memberList.append(Member("12 2 14.75 14.75 0.0 0.0 -85.2 0.0 0.0 -75.708 0.046 9.492 3792.35 ", nw))
# memberList.append(Member("13 2 14.75 14.75 0.0 0.0 -75.708 0.0 0.0 -72.734 0.046 2.974 1883.78 ", nw))
# =============================================================================
# Ballast members from Stein getting weight = displ
memberList.append(Member("11 2 14.75 14.75 0.0 0.0 -90.000 0.0 0.0 -85.200 0.046 4.800 7850. ", nw))
memberList.append(Member("12 2 14.75 14.75 0.0 0.0 -85.200 0.0 0.0 -75.708 0.046 9.492 2650. ", nw))
memberList.append(Member("13 2 14.75 14.75 0.0 0.0 -75.708 0.0 0.0 -72.734 0.046 2.974 1025. ", nw))
memberList.append(Member("14 2 14.75 14.75 0.0 0.0 -72.734 0.0 0.0 -20. 0.046 0.0 1025. ", nw))
memberList.append(Member("15 2 14.75 8.00 0.0 0.0 -20. 0.0 0.0 -5. 0.063 0.0 1025.0 ", nw))
memberList.append(Member("16 2 8.00 8.00 0.0 0.0 -5. 0.0 0.0 7. 0.068 0.0 1025.0 ", nw))
memberList.append(Member("17 2 8.00 7.00 0.0 0.0 7. 0.0 0.0 11. 0.055 0.0 1025.0 ", nw))
# -------------------------- turbine RNA description ------------------------
mRotor = 227962 #[kg]
mNacelle = 446036 #[kg]
IxHub = 325671 #[kg-m^2]
IzNacelle = 7326346 #[kg-m^2]
IxBlades = 45671252 #[kg-m^2] MOI value from FAST file, don't know where MOI is about. Assuming about the hub
xCG_Hub = -7.07 #[m] from yaw axis
xCG_Nacelle = 2.687 #[m] from yaw axis
mRNA = mRotor + mNacelle #[kg]
IxRNA = IxBlades*(1 + 1 + 1) + IxHub # RNA moment of inertia about local x axis (assumed to be identical to rotor axis for now, as approx) [kg-m^2]
IrRNA = IxBlades*(1 + .5 + .5) + IzNacelle # RNA moment of inertia about local y or z axes [kg-m^2]
xCG_RNA = ((mRotor*xCG_Hub)+(mNacelle*xCG_Nacelle))/(mRotor+mNacelle) # x location of RNA center of mass [m]
#hHub = 119.0 # hub height above water line [m]
hHub = 118.0
# ------- Wind conditions
Fthrust = 800e3 # peak thrust force, [N]
Mthrust = hHub*Fthrust # overturning moment from turbine thrust force [N-m]
# ---------------- (future work) import hydrodynamic coefficient files ----------------
# ---------------------------- environmental conditions -------------------------------
depth = 200
rho = 1025
g = 9.81
pi = np.pi
# environmental condition(s)
Hs = [8 ];
Tp = [12];
windspeed = [8 ];
S = np.zeros([len(Hs), nw]) # wave spectrum
S2 = np.zeros([len(Hs), nw]) # wave spectrum
zeta= np.zeros([len(Hs), nw]) # wave elevation
T = 2*np.pi/w # periods
for imeto in range(len(Hs)): # loop through each environmental condition
# make wave spectrum (setting to 1 gives approximate RAOs)
S[imeto,:] = JONSWAP(w, Hs[imeto], Tp[imeto])
# wave elevation amplitudes (these are easiest to use) - no need to be complex given frequency domain use
zeta[imeto,:] = np.sqrt(S[imeto,:])
# get wave number
for i in range(nw):
k[i] = waveNumber(w[i], depth)
'''
plt.plot(w/np.pi/2, S[2,:], "r")
plt.plot(w/np.pi/2, S2[2,:], "b")
plt.xlabel("Hz")
'''
# ignoring multiple DLCs for now <<<<<
#for imeto = 1:length(windspeeds)
imeto = 0
# ---------------------- set up key arrays -----------------------------
# structure-related arrays
M_struc = np.zeros([6,6]) # structure/static mass/inertia matrix [kg, kg-m, kg-m^2]
B_struc = np.zeros([6,6]) # structure damping matrix [N-s/m, N-s, N-s-m] (may not be used)
C_struc = np.zeros([6,6]) # structure effective stiffness matrix [N/m, N, N-m]
W_struc = np.zeros([6]) # static weight vector [N, N-m]
# hydrodynamics-related arrays
A_hydro = np.zeros([6,6,nw]) # hydrodynamic added mass matrix [kg, kg-m, kg-m^2]
C_hydro = np.zeros([6,6]) # hydrostatic stiffness matrix [N/m, N, N-m]
W_hydro = np.zeros(6) # buoyancy force/moment vector [N, N-m]
F_hydro = np.zeros([6,nw],dtype=complex) # excitation force/moment complex amplitudes vector [N, N-m]
# moorings-related arrays
A_moor = np.zeros([6,6,nw]) # mooring added mass matrix [kg, kg-m, kg-m^2] (may not be used)
B_moor = np.zeros([6,6,nw]) # mooring damping matrix [N-s/m, N-s, N-s-m] (may not be used)
C_moor = np.zeros([6,6]) # mooring stiffness matrix (linearized about platform offset) [N/m, N, N-m]
W_moor = np.zeros(6) # mean net mooring force/moment vector [N, N-m]
# final coefficient arrays
M_tot = np.zeros([6,6,nw]) # total mass and added mass matrix [kg, kg-m, kg-m^2]
B_tot = np.zeros([6,6,nw]) # total damping matrix [N-s/m, N-s, N-s-m]
C_tot = np.zeros([6,6,nw]) # total stiffness matrix [N/m, N, N-m]
F_tot = np.zeros([6,nw], dtype=complex) # total excitation force/moment complex amplitudes vector [N, N-m]
# --------------- add in linear hydrodynamic coefficients here if applicable --------------------
# TODO <<<
# --------------- Get general geometry properties including hydrostatics ------------------------
# initialize some variables for running totals
VTOT = 0. # Total underwater volume of all members combined
mTOT = 0. # Total mass of all members [kg]
AWP_TOT = 0. # Total waterplane area of all members [m^2]
IWPx_TOT = 0 # Total waterplane moment of inertia of all members about x axis [m^4]
IWPy_TOT = 0 # Total waterplane moment of inertia of all members about y axis [m^4]
Sum_V_rCB = np.zeros(3) # product of each member's buoyancy multiplied by center of buoyancy [m^4]
Sum_AWP_rWP = np.zeros(2) # product of each member's waterplane area multiplied by the area's center point [m^3]
Sum_M_center = np.zeros(3) # product of each member's mass multiplied by its center of mass [kg-m] (Only considers the shell mass right now)
# loop through each member
for mem in memberList:
q, p1, p2 = mem.getDirections() # get unit direction vectors
# ---------------------- get member's mass and inertia properties ------------------------------
rho_steel = 8500 #[kg/m^3]
v_steel, center, I_rad, I_ax, m_fill = mem.getInertia() # calls the getInertia method to calcaulte values
mass = v_steel*rho_steel + m_fill #[kg]
Mmat = np.diag([mass, mass, mass, I_rad, I_rad, I_ax]) # MOI matrix = Mmat[3:,3:] is 0 on off diags bc symmetry in cylinders
# @mhall: Mmat as written above is the mass and inertia matrix about the member CG...@shousner: you betcha
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices
W_struc += translateForce3to6DOF( center, np.array([0,0, -g*mass]) ) # weight vector
M_struc += translateMatrix6to6DOF(center, Mmat) # mass/inertia matrix
# @mhall: Using the diagonal Mmat, and calling the above function with the "center" coordinate, will give the mass/inertia about the PRP!
# @shousner: center is the position vector of the CG of the member, from the global coordinates aka PRP
Sum_M_center += center*mass
# -------------------- get each member's buoyancy/hydrostatic properties -----------------------
Fvec, Cmat, V_UW, r_CB, AWP, IWP, xWP, yWP = mem.getHydrostatics() # call to Member method for hydrostatic calculations
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices <<<<< needs updating (already about PRP)
W_hydro += Fvec # translateForce3to6DOF( mem.rA, np.array([0,0, Fz]) ) # weight vector
C_hydro += Cmat # translateMatrix6to6DOF(mem.rA, Cmat) # hydrostatic stiffness matrix
VTOT += V_UW # add to total underwater volume of all members combined
AWP_TOT += AWP
IWPx_TOT += IWP + AWP*yWP**2
IWPy_TOT += IWP + AWP*xWP**2
Sum_V_rCB += r_CB*V_UW
Sum_AWP_rWP += np.array([xWP, yWP])*AWP
# ------------------------- include RNA properties -----------------------------
# for now, turbine RNA is specified by some simple lumped properties
Mmat = np.diag([mRNA, mRNA, mRNA, IxRNA, IrRNA, IrRNA]) # create mass/inertia matrix
center = np.array([xCG_RNA, 0, hHub]) # RNA center of mass location
# now convert everything to be about PRP (platform reference point) and add to global vectors/matrices
W_struc += translateForce3to6DOF( center, np.array([0,0, -g*mRNA]) ) # weight vector
M_struc += translateMatrix6to6DOF(center, Mmat) # mass/inertia matrix
Sum_M_center += center*mRNA
# ----------- process key hydrostatic-related totals for use in static equilibrium solution ------------------
mTOT = M_struc[0,0]
rCG_TOT = Sum_M_center/mTOT
rCB_TOT = Sum_V_rCB/VTOT # location of center of buoyancy on platform
if VTOT==0: # if you're only working with members above the platform, like modeling the wind turbine
zMeta = 0
else:
zMeta = rCB_TOT[2] + IWPx_TOT/VTOT # add center of buoyancy and BM=I/v to get z elevation of metecenter [m] (have to pick one direction for IWP)
C_struc[3,3] = -mTOT*g*rCG_TOT[2]
C_struc[4,4] = -mTOT*g*rCG_TOT[2]
# --------------- set up quasi-static mooring system and solve for mean offsets -------------------
import MoorDesign as md
# =============================================================================
# # Inputs for OC3 Hywind
# depth = 320.
# type_string = 'main'
# anchorR = 853.87
# fairR = 5.2
# fair_depth = 70.
# LineLength = 902.2
# LineD = 0.09
# dryMass_L = 77.7066 #[kg/m]
# EA = 384243000 #[N]
# angle = np.array([0, 2*np.pi/3, -2*np.pi/3])
# =============================================================================
# Inputs for DTU 10 MW setup
depth = 600. #[m]
type_string = 'main'
LineD = 0.15 #[m]
wetMass_L = 4.401 #[kg/m]
dryMass_L = wetMass_L + (np.pi/4)*LineD**2*rho
EA = 384243000 #100e6 #[N]
angle = np.array([0, 2*np.pi/3, -2*np.pi/3])
anchorR = 656.139 #[m]
fair_depth = 21. #[m]
fairR = 7.875 #[m]
LineLength = 868.5 #[m]
MooringSystem = md.make3LineSystem(depth, type_string, LineD, dryMass_L, EA, angle, anchorR, fair_depth, fairR, LineLength)
MooringSystem.BodyList[0].m = mTOT
MooringSystem.BodyList[0].v = VTOT
MooringSystem.BodyList[0].rCG = rCG_TOT
MooringSystem.BodyList[0].AWP = AWP_TOT
MooringSystem.BodyList[0].rM = np.array([0,0,zMeta])
MooringSystem.BodyList[0].f6Ext = np.array([Fthrust,0,0, 0,Mthrust,0])
MooringSystem.initialize()
# --------------- Bridle Confirguration --------------------------
# Inputs for DTU 10 MW setup
depth = 600. #[m]
type_string = ['chain','synth']
LineD = [0.17971, 0.15] #[m]
#wetMass_L = 4.401 #[kg/m]
#dryMass_L = wetMass_L + (np.pi/4)*LineD**2*rho
dryMass_L = [200, 40.18]
EA = [0, 121415000] #100e6 #[N]
EA[0] = 0.854e11*(LineD[0]**2)
angle = np.array([0, 2*np.pi/3, -2*np.pi/3])
anchorR = 656.139 #[m]
fair_depth = 21. #[m]
fairR = 7.875 #[m]
LineLength = 868.5 #[m]
chainLength = 80
bridleLength = 30
synthLength = LineLength-chainLength-bridleLength
synthR = anchorR-0.9*chainLength
synthZ = 0.95*depth
bridleR = ((synthR-fairR)/(synthLength+bridleLength))*bridleLength + fairR
bridleZ = ((synthZ-fair_depth)/(synthLength+bridleLength))*bridleLength + fair_depth
Bridle = md.makeBridleSystem(depth, type_string, LineD, dryMass_L, EA, angle, anchorR, fair_depth, fairR, LineLength, chainLength, bridleLength, synthLength, synthR, synthZ, bridleR, bridleZ)
Bridle.BodyList[0].m = mTOT
Bridle.BodyList[0].v = VTOT
Bridle.BodyList[0].rCG = rCG_TOT
Bridle.BodyList[0].AWP = AWP_TOT
Bridle.BodyList[0].rM = np.array([0,0,zMeta])
Bridle.BodyList[0].f6Ext = np.array([Fthrust,0,0, 0,Mthrust,0])
Bridle.initialize()
# If using the bridle mooring system rather than the original, do a rename so we can refer to it as MooringSystem going forward (otherwise comment the line out)
MooringSystem = Bridle
# ----------------------------- Calculate mooring system characteristics ---------------------
# First get mooring system characteristics about undisplaced platform position (useful for baseline and verification)
C_moor0 = MooringSystem.BodyList[0].getStiffness2(np.zeros(6), dx=0.01) # get mooring stiffness (uses new method that accounts for free Points in mooring system)
W_moor0 = MooringSystem.BodyList[0].getForces(lines_only=True) # get net forces and moments from mooring lines on Body
# Now find static equilibrium offsets of platform and get mooring properties about that point
MooringSystem.solveEquilibrium() # get the system to its equilibrium
MooringSystem.plot()
r6eq = MooringSystem.BodyList[0].r6
print("Equilibirum platform positions/rotations:")
printVec(r6eq)
print("Surge: {:.2f}".format(r6eq[0]))
print("Pitch: {:.2f}".format(r6eq[4]*180/np.pi))
C_moor = MooringSystem.BodyList[0].getStiffness2(r6eq, dx=0.01) # get mooring stiffness (uses new method that accounts for free Points in mooring system)
W_moor = MooringSystem.BodyList[0].getForces(lines_only=True) # get net forces and moments from mooring lines on Body
# manually add yaw spring stiffness as compensation until bridle (crow foot) configuration is added
#C_moor[5,5] += 98340000.0
#print(stopt)
# ------------------------------- sum all static matrices -----------------------------------------
# this is to check totals from static calculations before hydrodynamic terms are added
M_tot_stat = M_struc
C_tot_stat = C_struc + C_hydro + C_moor
W_tot_stat = W_struc + W_hydro + W_moor
print("hydrostatic stiffness matrix")
printMat(C_hydro)
print("structural stiffness matrix")
printMat(C_struc)
print("mooring stiffness matrix")
printMat(C_moor)
print("total static mass matrix")
printMat(M_tot_stat)
print("total static stiffness matrix")
printMat(C_tot_stat)
print("total static forces and moments")
printVec(W_tot_stat)
# --------------------- get constant hydrodynamic values along each member -----------------------------
A_hydro_morison = np.zeros([6,6]) # hydrodynamic added mass matrix, from only Morison equation [kg, kg-m, kg-m^2]
F_hydro_iner = np.zeros([6,nw],dtype=complex) # inertia excitation force/moment complex amplitudes vector [N, N-m]
# loop through each member
for mem in memberList:
# loop through each node of the member
for il in range(mem.n):
# only process hydrodynamics if this node is submerged
if mem.r[il,2] < 0:
q, p1, p2 = mem.getDirections() # get unit direction vectors
# set dl to half if at the member end (the end result is similar to trapezoid rule integration)
if il==0 or il==mem.n:
dl = 0.5*mem.dl
else:
dl = mem.dl
# get wave kinematics spectra given a certain wave spectrum and location
mem.u[il,:,:], mem.ud[il,:,:] = getWaveKin(zeta[imeto,:], w, k, depth, mem.r[il,:])
# local added mass matrix
Amat = rho*0.25*np.pi*mem.d[il]**2*dl *( mem.Ca_q*VecVecTrans(q) + mem.Ca_p1*VecVecTrans(p1) + mem.Ca_p2*VecVecTrans(p2) )
# add to global added mass matrix for Morison members
A_hydro_morison += translateMatrix3to6DOF(mem.r[il,:], Amat)
# local inertial excitation matrix
Imat = rho*0.25*np.pi*mem.d[il]**2*dl *( (1+mem.Ca_q)*VecVecTrans(q) + (1+mem.Ca_p1)*VecVecTrans(p1) + (1+mem.Ca_p2)*VecVecTrans(p2) )
for i in range(nw): # for each wave frequency...
# local inertial excitation force complex amplitude in x,y,z
F_exc_inert = np.matmul(Imat, mem.ud[il,:,i])
# add to global excitation vector (frequency dependent)
F_hydro_iner[:,i] += translateForce3to6DOF( mem.r[il,:], F_exc_inert)
# --------------------------------- get system properties in undisplaced position ----------------------------
# these are useful for verification, etc.
# sum matrices to check totals from static calculations before hydrodynamic terms are added
C_tot0 = C_struc + C_hydro + C_moor0 # total system stiffness matrix about undisplaced position
W_tot0 = W_struc + W_hydro + W_moor0 # system mean forces and moments at undisplaced position
M = M_struc + A_hydro_morison # total mass plus added mass matrix
# do we want to relinearize structural properties about displaced position/orientation? (Probably not)
'''
print("hydrostatic stiffness matrix")
printMat(C_hydro)
print("structural stiffness matrix")
printMat(C_struc)
print("mooring stiffness matrix about undisplaced position")
printMat(C_moor0)
print("total static stiffness matrix about undisplaced position")
printMat(C_tot0)
print("total static mass matrix")
printMat(M_struc)
print("total added mass matrix")
printMat(A_hydro_morison)
print("total mass plus added mass matrix")
printMat(M)
print("total static forces and moments about undisplaced position")
printVec(W_tot0)
'''
# calculate natural frequencies (using eigen analysis to get proper values for pitch and roll - otherwise would need to base about CG if using diagonal entries only)
eigenvals, eigenvectors = np.linalg.eig(np.matmul(np.linalg.inv(M), C_tot0)) # <<< need to sort this out so it gives desired modes, some are currently a bit messy
# alternative attempt to calculate natural frequencies based on diagonal entries (and taking pitch and roll about CG)
zMoorx = C_tot0[0,4]/C_tot0[0,0] # effective z elevation of mooring system reaction forces in x and y directions
zMoory = C_tot0[1,3]/C_tot0[1,1]
zCG = rCG_TOT[2] # center of mass in z
zCMx = M[0,4]/M[0,0] # effective z elevation of center of mass and added mass in x and y directions
zCMy = M[1,3]/M[1,1]
print("natural frequencies without added mass")
fn = np.zeros(6)
fn[0] = np.sqrt( C_tot0[0,0] / M_struc[0,0] )/ 2.0/np.pi
fn[1] = np.sqrt( C_tot0[1,1] / M_struc[1,1] )/ 2.0/np.pi
fn[2] = np.sqrt( C_tot0[2,2] / M_struc[2,2] )/ 2.0/np.pi
fn[5] = np.sqrt( C_tot0[5,5] / M_struc[5,5] )/ 2.0/np.pi
zg = rCG_TOT[2]
fn[3] = np.sqrt( (C_tot0[3,3] + C_tot0[1,1]*((zCG-zMoory)**2 - zMoory**2) ) / (M_struc[3,3] - M_struc[1,1]*zg**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
fn[4] = np.sqrt( (C_tot0[4,4] + C_tot0[0,0]*((zCG-zMoorx)**2 - zMoorx**2) ) / (M_struc[4,4] - M_struc[0,0]*zg**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
printVec(fn)
print("natural frequencies with added mass")
fn = np.zeros(6)
fn[0] = np.sqrt( C_tot0[0,0] / M[0,0] )/ 2.0/np.pi
fn[1] = np.sqrt( C_tot0[1,1] / M[1,1] )/ 2.0/np.pi
fn[2] = np.sqrt( C_tot0[2,2] / M[2,2] )/ 2.0/np.pi
fn[5] = np.sqrt( C_tot0[5,5] / M[5,5] )/ 2.0/np.pi
fn[3] = np.sqrt( (C_tot0[3,3] + C_tot0[1,1]*((zCMy-zMoory)**2 - zMoory**2) ) / (M[3,3] - M[1,1]*zCMy**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
fn[4] = np.sqrt( (C_tot0[4,4] + C_tot0[0,0]*((zCMx-zMoorx)**2 - zMoorx**2) ) / (M[4,4] - M[0,0]*zCMx**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
# note that the above lines use off-diagonal term rather than parallel axis theorem since rotation will not be exactly at CG due to effect of added mass
printVec(fn)
# ------------------- solve for platform dynamics, iterating until convergence --------------------
Z = np.zeros([6,6,nw], dtype=complex) # total system impedance matrix
# system response
Xi = np.zeros([6,nw], dtype=complex) + 0.01 # displacement and rotation complex amplitudes [m, rad]
nIter = 2 # maximum number of iterations to allow
# start fixed point iteration loop for dynamics
for iiter in range(nIter):
# ------ calculate linearized coefficients within iteration -------
B_hydro_drag = np.zeros([6,6]) # hydrodynamic damping matrix (just linearized viscous drag for now) [N-s/m, N-s, N-s-m]
F_hydro_drag = np.zeros([6,nw],dtype=complex) # excitation force/moment complex amplitudes vector [N, N-m]
# loop through each member
for mem in memberList:
q, p1, p2 = mem.getDirections() # get unit direction vectors
# loop through each node of the member
for il in range(mem.n):
# node displacement, velocity, and acceleration (each [3 x nw])
drnode, vnode, anode = getVelocity(mem.r[il,:], Xi, w) # get node complex velocity spectrum based on platform motion's and relative position from PRP
# only process hydrodynamics if this node is submerged
if mem.r[il,2] < 0:
# water relative velocity over node (complex amplitude spectrum) [3 x nw]
vrel = mem.u[il,:] - vnode
# break out velocity components in each direction relative to member orientation [nw]
vrel_q = vrel* q[:,None]
vrel_p1 = vrel*p1[:,None]
vrel_p2 = vrel*p2[:,None]
# get RMS of relative velocity component magnitudes (real-valued)
vRMS_q = np.linalg.norm( np.abs(vrel_q ) ) # equivalent to np.sqrt( np.sum( np.abs(vrel_q )**2) /nw)
vRMS_p1 = np.linalg.norm( np.abs(vrel_p1) )
vRMS_p2 = np.linalg.norm( np.abs(vrel_p2) )
# linearized damping coefficients in each direction relative to member orientation [not explicitly frequency dependent...] (this goes into damping matrix)
Bprime_q = np.sqrt(8/np.pi) * vRMS_q * 0.5*rho * np.pi*mem.d[il]*mem.dl * mem.Cd_q
Bprime_p1 = np.sqrt(8/np.pi) * vRMS_p1 * 0.5*rho * mem.d[il]*mem.dl * mem.Cd_p1
Bprime_p2 = np.sqrt(8/np.pi) * vRMS_p2 * 0.5*rho * mem.d[il]*mem.dl * mem.Cd_p2
# convert to global orientation
Bmat = Bprime_q*VecVecTrans(q) + Bprime_p1*VecVecTrans(p1) + Bprime_p2*VecVecTrans(p2)
# add to global damping matrix for Morison members
Btemp = translateMatrix3to6DOF(mem.r[il,:], Bmat)
#breakpoint()
B_hydro_drag += Btemp
# excitation force based on linearized damping coefficients [3 x nw]
F_exc_drag = np.zeros([3, nw], dtype=complex) # <<< should set elsewhere <<<
for i in range(nw):
# get local 3d drag excitation force complex amplitude for each frequency
F_exc_drag[:,i] = np.matmul(Bmat, mem.u[il,:,i])
# add to global excitation vector (frequency dependent)
F_hydro_drag[:,i] += translateForce3to6DOF( mem.r[il,:], F_exc_drag[:,i])
# ----------------------------- solve matrix equation of motion ------------------------------
for ii in range(nw): # loop through each frequency
# sum contributions for each term
M_tot[:,:,ii] = M_struc + A_hydro_morison # mass
B_tot[:,:,ii] = B_struc + B_hydro_drag # damping
C_tot[:,:,ii] = C_struc + C_hydro + C_moor # stiffness
F_tot[:, ii] = F_hydro_drag[:,ii] + F_hydro_iner[:,ii] # excitation force (complex amplitude)
# form impedance matrix
Z[:,:,ii] = -w[ii]**2 * M_tot[:,:,ii] + 1j*w[ii]*B_tot[:,:,ii] + C_tot[:,:,ii];
# solve response (complex amplitude)
Xi[:,ii] = np.matmul(np.linalg.inv(Z[:,:,ii]), F_tot[:,ii] )
'''
#Xi{imeto} = rao{imeto}.*repmat(sqrt(S(:,imeto)),1,6); # complex response!
#aNacRAO{imeto} = -(w').^2 .* (rao{imeto}(:,1) + hNac*rao{imeto}(:,5)); # Nacelle Accel RAO
#aNac2(imeto) = sum( abs(aNacRAO{imeto}).^2.*S(:,imeto) ) *(w(2)-w(1)); # RMS Nacelle Accel
# ----------------- convergence check --------------------
conv = abs(aNac2(imeto)/aNac2last - 1);
#disp(['at ' num2str(iiter) ' iterations - convergence is ' num2str(conv)])
if conv < 0.0001
break
else
aNac2last = aNac2(imeto);
end
'''
# ------------------------------ preliminary plotting of response ---------------------------------
fig, ax = plt.subplots(3,1, sharex=True)
ax[0].plot(w, np.abs(Xi[0,:]) , 'b', label="surge")
ax[0].plot(w, np.abs(Xi[1,:]) , 'g', label="sway")
ax[0].plot(w, np.abs(Xi[2,:]) , 'r', label="heave")
ax[1].plot(w, np.abs(Xi[3,:])*180/np.pi, 'b', label="roll")
ax[1].plot(w, np.abs(Xi[4,:])*180/np.pi, 'g', label="pitch")
ax[1].plot(w, np.abs(Xi[5,:])*180/np.pi, 'r', label="yaw")
ax[2].plot(w, zeta[0,:], 'k', label="wave amplitude (m)")
ax[0].legend()
ax[1].legend()
ax[2].legend()
#ax[0].set_ylim([0, 1e6])
#ax[1].set_ylim([0, 1e9])
ax[0].set_ylabel("response magnitude (m)")
ax[1].set_ylabel("response magnitude (deg)")
ax[2].set_ylabel("wave amplitude (m)")
ax[2].set_xlabel("frequency (Hz)")
plt.show()
# ---------- mooring line fairlead tension RAOs and constraint implementation ----------
'''
for il=1:Platf.Nlines
#aNacRAO{imeto} = -(w').^2 .* (X{imeto}(:,1) + hNac*X{imeto}(:,5)); # Nacelle Accel RAO
#aNac2(imeto) = sum( abs(aNacRAO{imeto}).^2.*S(:,imeto) ) *(w(2)-w(1)); # RMS Nacelle Accel
TfairRAO{imeto}(il,:) = C_lf(il,:,imeto)*rao{imeto}(:,:)'; # get fairlead tension RAO for each line (multiply by dofs)
#RMSTfair{imeto}(il) = sqrt( sum( (abs(TfairRAO{imeto}(il,:))).^2) / length(w) );
#figure
#plot(w,abs(TfairRAO{imeto}(il,:)))
#d=TfairRAO{imeto}(il,:)
RMSTfair{imeto}(il) = sqrt( sum( (abs(TfairRAO{imeto}(il,:)).^2).*S(:,imeto)') *(w(2)-w(1)) );
#RMSTfair
#sumpart = sum( (abs(TfairRAO{imeto}(il,:)).^2).*S(:,imeto)')
#dw=(w(2)-w(1))
end
[Tfair, il] = min( T_lf(:,imeto) );
if Tfair - 3*RMSTfair{imeto}(il) < 0 && Xm < 1 # taut lines only
disp([' REJECTING (mooring line goes slack)'])
fitness = -1;
return; # constraint for slack line!!!
end
if grads
disp(['mooring slackness: ' num2str(Tfair - 3*RMSTfair{imeto}(il))])
end
# ----------- dynamic pitch constraint ----------------------
#disp('checking dynamic pitch');
RMSpitch(imeto) = sqrt( sum( ((abs(rao{imeto}(:,5))).^2).*S(:,imeto) ) *(w(2)-w(1)) ); # fixed April 9th :(
RMSpitchdeg = RMSpitch(imeto)*60/pi;
if (Platf.spitch + RMSpitch(imeto))*180/pi > 10
disp([' REJECTING (static + RMS dynamic pitch > 10)'])
fitness = -1;
return;
end
if grads
disp(['dynamic pitch: ' num2str((Platf.spitch + RMSpitch(imeto))*180/pi)])
end
#figure(1)
#plot(w,S(:,imeto))
#hold on
#figure()
#plot(2*pi./w,abs(Xi{imeto}(:,5)))
#ylabel('pitch response'); xlabel('T (s)')
RMSsurge(imeto) = sqrt( sum( ((abs(rao{imeto}(:,1))).^2).*S(:,imeto) ) *(w(2)-w(1)) );
RMSheave(imeto) = sqrt( sum( ((abs(rao{imeto}(:,3))).^2).*S(:,imeto) ) *(w(2)-w(1)) );
'''
| true |
25cbf73fe84ed295402f5aacba217d3ad676d4a7 | Python | themellion/feecalculator | /feecalculator.py | UTF-8 | 1,732 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 15 07:00:17 2019
@author: themellion
"""
import numpy as np
import pandas as pd
#initialise the fee structure
term12 = [50, 90, 90, 115, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300, 320, 340, 360, 380, 400]
term24 = [70, 100, 120, 160, 200, 240, 280, 320, 360, 400, 440, 480, 520, 560, 600, 640, 680, 720, 760, 800]
#@app.route("/")
def feecalculator(term, amount=2750):
"""Calculate fee amount based on given rules """
#test the term provided
if term not in (12, 24):
return('Term should be either 12 or 24 months')
#test the amount provided
if amount < 1000 or amount > 20000:
return('Amount should be between £1,000 and £20,000')
#create series of amounts
amt = list(range(1000, 20001, 1000))
#create a series with the fee and the respective amount
if term == 12:
s = pd.Series(term12, index = amt)
else:
s = pd.Series(term24, index = amt)
#if amount provided not in the df then create new series
if amount not in s.index:
s.loc[amount] = np.nan
#sort the updated series and apply interpolation if missing values
s = s.sort_index()
s = s.interpolate(method='index')
fee = s[s.index == amount]
#make sure that fee is rounded to be an exact multiple of 5
if int(fee) % 5 == 0:
return(int(fee))
else:
return(int(fee) - int(fee) % 5)
feecalculator(24, 1000)
#feecalculator(24, 2000)
#feecalculator(24, 3000)
#feecalculator(24, 20000)
#feecalculator(24, 19198)
#
#feecalculator(12, 1000)
#feecalculator(12, 3000)
#feecalculator(12, 20000)
#feecalculator(12, 19198)
| true |
210aeac78e3e6bd11a10e58d6bc1a8d86a2c30fe | Python | ReshmaRajanChinchu/python-programming | /python29.py | UTF-8 | 135 | 3.328125 | 3 | [] | no_license | a=int(input("enter the time"))
s=0
if(a<=60):
print(str(s)+":"+ str(a))
else:
c=a//60
d=a%60
print(str(c)+":"+str(d))
| true |
efffd1a5f8a968cedb6ca5cfbde48170962a0f70 | Python | neizod/problems | /acm/uva/623-500factorial.py | UTF-8 | 264 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from math import factorial
def main():
try:
while True:
n = int(input())
print('{}!'.format(n))
print(factorial(n))
except EOFError:
pass
if __name__ == '__main__':
main()
| true |
1235262f0f8c742479aeb437efd965c1f3bd326c | Python | hcsullivan12/pixsim | /python/pixsim/velocity.py | UTF-8 | 1,487 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
'''
Electron drift functionality.
'''
import math
import numpy
from collections import namedtuple
def mobility_function(Emag, temperature = 89):
'''
Return the mobility for the given magnitude of the
electric field and temperature. Units need to be kV, cm.
'''
Trel = temperature / 89
a0=551.6 # cm2/sec
a1=7953.7 # cm2/sec/kV
a2=4440.43 # cm2/sec/kV^3/2
a3=4.29 # cm2/sec/kV^5/2
a4=43.63 # cm2/sec/kV^2
a5=0.2053 # cm2/sec/kV^3
e2 = Emag*Emag
e3 = Emag*e2
e5 = e2*e3
e52 = math.sqrt(e5)
e32 = math.sqrt(e3)
Trel32 = math.sqrt(Trel*Trel*Trel)
mu = (a0 + a1*Emag +a2*e32 + a3*e52)
mu /= (1 + (a1/a0)*Emag + a4*e2 + a5*e3) * Trel32
return mu
mobility = numpy.vectorize(mobility_function)
def drift(potential, linspaces, temperature=89, **kwds):
'''
Return an N-field matrix calculated assuming result holds a potential.
'''
print 'Drifting...'
dxyz = [(ls[1]-ls[0]) for ls in linspaces]
E = numpy.asarray(numpy.gradient(potential, *dxyz))
# potential is in V and linspaces should be in cm, convert to kV/cm
E /= 1000.
Emag = numpy.sqrt(E[0]**2 + E[1]**2 + E[2]**2)
# mu comes back as cm2/Vs
mu = mobility(Emag, temperature)
# convert velocity to cm/us
vel = mu*E/1000.
from pixsim.models import Array
return [ Array(typename='vector', name='vfield', data=vel) ]
| true |
441eedb34d9ec28a5fe03a4e1198d22f0f04b360 | Python | nestoop/base-python | /base/ai/LineRegression-Lasso.py | UTF-8 | 4,153 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
X_boson, y_boson = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X_boson, y_boson, random_state=0)
lr = LinearRegression().fit(X_train, y_train)
print("LinearRegression Train set score: {:.2f}".format(lr.score(X_train, y_train)))
print("LinearRegression Test set score: {:.2f}".format(lr.score(X_test, y_test)))
ridge = Ridge().fit(X_train, y_train)
print("LinearRegression Ridge Train set score: {:.2f}".format(ridge.score(X_train, y_train)))
print("LinearRegression Ridge Test set score: {:.2f}".format(ridge.score(X_test, y_test)))
ridge_01 = Ridge(alpha=0.1).fit(X_train, y_train)
print("LinearRegression Ridge alpha=0.1 ,Train set score: {:.2f}".format(ridge_01.score(X_train, y_train)))
print("LinearRegression Ridge alpha=0.1 ,Test set score: {:.2f}".format(ridge_01.score(X_test, y_test)))
ridge_1 = Ridge(alpha=1).fit(X_train, y_train)
print("LinearRegression Ridge ,alpha=1 Train set score: {:.2f}".format(ridge_1.score(X_train, y_train)))
print("LinearRegression Ridge ,alpha=1, Test set score: {:.2f}".format(ridge_1.score(X_test, y_test)))
ridge_10 = Ridge(alpha=10).fit(X_train, y_train)
print("LinearRegression Ridge alpha=10 Train set score: {:.2f}".format(ridge_10.score(X_train, y_train)))
print("LinearRegression Ridge alpha=10,Test set score: {:.2f}".format(ridge_10.score(X_test, y_test)))
lasso = Lasso().fit(X_train, y_train)
print("LinearRegression Lasso Train set score: {:.2f}".format(lasso.score(X_train, y_train)))
print("LinearRegression Lasso Test set score: {:.2f}".format(lasso.score(X_test, y_test)))
print("LinearRegression Lasso Number of features used: {:.2f}".format(np.sum(lasso.coef_ != 0)))
lasso_01 = Lasso(alpha=0.1, max_iter=100000).fit(X_train, y_train)
print("LinearRegression Lasso alpha=0.1 Train set score: {:.2f}".format(lasso_01.score(X_train, y_train)))
print("LinearRegression Lasso alpha=0.1 Test set score: {:.2f}".format(lasso_01.score(X_test, y_test)))
print("LinearRegression Lasso alpha=0.1 Number of features used: {:.2f}".format(np.sum(lasso_01.coef_ != 0)))
lasso_001 = Lasso(alpha=0.01, max_iter=100000).fit(X_train, y_train)
print("LinearRegression Lasso alpha=0.01 Train set score: {:.2f}".format(lasso_001.score(X_train, y_train)))
print("LinearRegression Lasso alpha=0.01 Test set score: {:.2f}".format(lasso_001.score(X_test, y_test)))
print("LinearRegression Lasso alpha=0.01 Number of features used: {:.2f}".format(np.sum(lasso_001.coef_ != 0)))
lasso_0001 = Lasso(alpha=0.001, max_iter=100000).fit(X_train, y_train)
print("LinearRegression Lasso alpha=0.001 Train set score: {:.2f}".format(lasso_0001.score(X_train, y_train)))
print("LinearRegression Lasso alpha=0.001 Test set score: {:.2f}".format(lasso_0001.score(X_test, y_test)))
print("LinearRegression Lasso alpha=0.001 Number of features used: {:.2f}".format(np.sum(lasso_0001.coef_ != 0)))
lasso_00001 = Lasso(alpha=0.0001, max_iter=100000).fit(X_train, y_train)
print("LinearRegression Lasso alpha=0.0001 Train set score: {:.2f}".format(lasso_00001.score(X_train, y_train)))
print("LinearRegression Lasso alpha=0.0001 Test set score: {:.2f}".format(lasso_00001.score(X_test, y_test)))
print("LinearRegression Lasso alpha=0.0001 Number of features used: {:.2f}".format(np.sum(lasso_00001.coef_ != 0)))
# plain and lassoExample.png
lassoExample = plt.plot(lasso.coef_, 's', label="lasso alpha=1")
lassoExample = plt.plot(lasso_001.coef_, '^', label="lasso alpha=0.01")
lassoExample = plt.plot(lasso_00001.coef_, 'v', label="lasso alpha=0.0001")
lassoExample = plt.plot(ridge_01.coef_, 'o', label="Ridge alpha=0.01")
lassoExample = plt.legend(ncol=2, loc=(0, 1.05))
lassoExample = plt.ylim(-25, 25)
lassoExample = plt.xlabel("Coefficient index")
lassoExample = plt.xlabel("Coefficient magnitude")
lassoExample.figure.savefig("lassoExample", bbox_inches='tight')
# plt.show()
| true |
3adfe4e6b8b5a375c06989652aff3abdd9b7d376 | Python | jlovering/AdventOfCode2020 | /Day16/day16-1.py | UTF-8 | 2,209 | 3.015625 | 3 | [] | no_license | #!/bin/python
import sys
import copy
import re
infile = open(sys.argv[1], "r")
class Validator:
def __init__(self, name):
self.name = name
self.ranges = []
def addRule(self, minV, maxV):
self.ranges.append((minV,maxV))
def checkAllRanges(self, value):
for r in self.ranges:
if value >= r[0] and value <= r[1]:
return True
return False
def __str__(self):
return "%d - %d or %d - %d" % (self.ranges[0][0], self.ranges[0][1], self.ranges[1][0], self.ranges[1][1])
class Ticket:
def __init__(self, fields):
self.fields = fields
def getFields(self):
return self.fields
def __str__(self):
return str(self.fields)
valitators = []
myTicket = None
otherTickets = []
def parseTicket(line):
fs = line.split(',')
return Ticket(map(int, fs))
def inputParse(file):
global valitators, myTicket, otherTickets
line = infile.readline().rstrip()
while line != "":
#Get the rules
lineM = re.match(r"(.+): (\d+)-(\d+) or (\d+)-(\d+)", line)
if lineM is None:
raise Exception("Bad match:\"%s\"" % line)
v = Validator(lineM.group(1))
v.addRule(int(lineM.group(2)), int(lineM.group(3)))
v.addRule(int(lineM.group(4)), int(lineM.group(5)))
valitators.append(v)
line = infile.readline().rstrip()
line = infile.readline().rstrip()
if line != "your ticket:":
raise Exception("Malformed")
line = infile.readline().rstrip()
myTicket = Ticket(parseTicket(line))
line = infile.readline().rstrip()
line = infile.readline().rstrip()
if line != "nearby tickets:":
raise Exception("Malformed")
line = infile.readline().rstrip()
while line and line != "":
otherTickets.append(parseTicket(line))
line = infile.readline().rstrip()
inputParse(infile)
invalidTicketValues = []
for t in otherTickets:
for f in t.getFields():
valitatorsResults = [ v.checkAllRanges(f) for v in valitators ]
if True not in valitatorsResults:
invalidTicketValues.append(f)
print(invalidTicketValues, sum(invalidTicketValues)) | true |
5c1aa93123f3575da75f3c919e927d4df85b7347 | Python | ricardomokhtari/ML-Models | /Clustering/hierarchical_clustering.py | UTF-8 | 1,511 | 3.78125 | 4 | [] | no_license | """
Implemetation of Hierarchical clustering algorithm for 2 independent variables:
- Annual income
- Spending score
Optimal cluster number is identified through examining dendrogram
"""
# import libraries
import pandas as pd
import matplotlib.pyplot as plt
# import the dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:,[3,4]].values
# Using dendrogram to find the optimal number of clusters
import scipy.cluster.hierarchy as sch
plt.figure(1)
dendrogram = sch.dendrogram(sch.linkage(X,method='ward')) # create dendrogram plot
plt.title('Dendrogram')
plt.xlabel('Customers')
plt.ylabel('Euclidean distances')
plt.show()
# Fitting hierarchical clustering
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=5, affinity='euclidean', linkage='ward')
y_hc = hc.fit_predict(X)
# Visualising the clusters
plt.figure(2)
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red',
label = 'Cluster 1')
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue',
label = 'Cluster 2')
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green',
label = 'Cluster 3')
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'pink',
label = 'Cluster 4')
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'black',
label = 'Cluster 5')
plt.title('Clusters')
plt.xlabel('Annual income')
plt.ylabel('Spending score')
plt.legend()
plt.show() | true |
bb00f7bb43276868f69ebce79b8bf63682781beb | Python | 0xchamin/Machine-Learning-Python | /Fundamentals/python_sandbox_finished/functions.py | UTF-8 | 769 | 5.125 | 5 | [] | no_license | # A function is a block of code which only runs when it is called. In Python, we do not use parentheses and curly brackets, we use indentation with tabs or spaces
# Create function
def sayHello(name = 'Sam'):
"""
Prints Hello and then name.
"""
print('Hello ' + name)
# Return value
def getSum(num1, num2):
total = num1 + num2
return total
numSum = getSum(2, 3)
def addOneToNum(num):
num += 1
return num
num = 5
new_num = addOneToNum(num)
print(new_num)
# A lambda function is a small anonymous function.
# A lambda function can take any number of arguments, but can only have one expression. Very similar to JS arrow functions
getSum = lambda num1, num2 : num1 + num2
print(getSum(9, 2))
addOneToNum = lambda num : num + 1
print(addOneToNum(5)) | true |
df29db0768db2eba89643c9aeb454032f38871ac | Python | TonsOfBricks/projects | /arrows.py | UTF-8 | 3,678 | 3.859375 | 4 | [] | no_license | """
Author: Nikita Sinkha
File: arroes.py
Date:3/2/2018
"""
import turtle as t
import random as r
import math as m
def border():
"""
func: border()
Draws a border of size 500x500.
"""
t.pencolor("blue")
t.forward(500)
t.left(90)
t.forward(500)
t.left(90)
t.forward(500)
t.left(90)
t.forward(500)
t.pencolor("black")
def init():
"""
func: init()
Initializes the canvas for the turtle to draw.
"""
t.speed(0)
t.home()
t.setworldcoordinates(-250, -250, 250, 250)
t.up()
t.goto(-250, -250)
t.down()
border()
t.up()
t.home()
t.down()
def triangles(s):
"""
func: traingles()
A function made to draw triangles with a side s.
param1: s -> Side of the triangles
"""
t.forward(s)
t.left(120)
t.forward(2*s)
t.left(120)
t.forward(2*s)
t.left(120)
t.forward(s)
def size():
"""
func: size()
Function made to randomize the length of the triangles side.
"""
s = r.randint(1, 30)
return s
def angle():
"""
func: angle()
Gives a random angle for the turtle to move after every
recursive call
"""
a = r.randint(0, 360)
return a
def length():
"""
func: length()
Length function desides the amount that the turtle moves forward
after every recursive call
"""
l = r.randint(1, 100)
return l
def colors():
"""
func: colors()
Randomizes the colors for the triangles after every recursive call
"""
c = r.random()
return c
def max_num():
"""
func: max_num():
Returns a value of 500 for using as a limit of maximum user input
"""
return 500
def drawFigureRec(n, area=0):
"""
func: drawFigureRec():
Draws triangles recursively one after the other at different angles and
distnaces
param1: n -> number of triangles
param2: area = 0 -> Area counter
"""
t.down()
s = size()
if n > 0:
t.begin_fill()
t.color(colors(), colors(), colors())
triangles(s)
temp = ((m.sqrt(3)/4)*s**2)
t.end_fill()
t.up()
t.forward(length())
t.left(angle())
if (t.xcor() > 200 or t.ycor() > 200 or t.xcor()
< -200 or t.ycor() < -200):
t.home()
t.forward(length())
t.left(angle())
return drawFigureRec(n-1, area+temp)
else:
return drawFigureRec(n-1, area+temp)
else:
return area
def main():
"""
func: main()
This function executes the program in order to produce favorable
outcome
"""
area = 0
lim = max_num()
num = int(input("Enter the number of triangles to be drawn (0-500):"))
if num <= lim:
init()
print("Total area covered by the turtle:",
drawFigureRec(num, area), "square units.")
print("Number of triangles:", num)
print("Thank you!")
t.done()
else:
print("Enter a number less than 500")
return main()
if __name__ == '__main__' :
main()
| true |
339ccd4d5e13b88ad7de380d93130e9ebef34876 | Python | Dragon-Dane/project-azua | /azua/models/bnn/nn/nets.py | UTF-8 | 2,986 | 2.71875 | 3 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | from typing import List, Union
import torch.nn as nn
import torchvision
def make_network(architecture: str, *args, **kwargs):
if architecture == "fcn":
return FCN(**kwargs)
elif architecture == "cnn":
return CNN(**kwargs)
elif architecture.startswith("resnet"):
net = getattr(torchvision.models, architecture)(num_classes=kwargs["out_features"])
if "kernel_size" in kwargs:
kernel_size = kwargs["kernel_size"]
stride = kwargs.get("stride", 1)
padding = kwargs.get("padding", kernel_size // 2)
in_channels = kwargs.get("in_channels", 3)
bias = net.conv1.bias is not None
net.conv1 = nn.Conv2d(in_channels, net.conv1.out_channels, kernel_size, stride, padding, bias=bias,)
if kwargs.get("remove_maxpool", False):
net.maxpool = nn.Identity()
return net
else:
raise ValueError("Unrecognized network architecture:", architecture)
class FCN(nn.Sequential):
"""Basic fully connected network class."""
def __init__(
self, sizes: List[int], nonlinearity: Union[str, type] = "ReLU", bn: bool = False, **layer_kwargs,
):
super().__init__()
nonl_class = getattr(nn, nonlinearity) if isinstance(nonlinearity, str) else nonlinearity
layer_kwargs.setdefault("bias", not bn)
for i, (s0, s1) in enumerate(zip(sizes[:-1], sizes[1:])):
self.add_module(f"Linear{i}", nn.Linear(s0, s1, **layer_kwargs))
if bn:
self.add_module(f"BN{i}", nn.BatchNorm1d(s1))
if i < len(sizes) - 2:
self.add_module(f"Nonlinarity{i}", nonl_class())
class CNN(nn.Sequential):
"""Basic CNN class with Conv/BN/Nonl/Maxpool blocks followed by a fully connected net. Batchnorm and maxpooling
are optional and the latter can also only be included after every nth block."""
def __init__(
self,
channels: List[int],
lin_sizes: List[int],
nonlinearity: Union[str, type] = "ReLU",
maxpool_freq: int = 1,
conv_bn: bool = False,
linear_bn: bool = False,
kernel_size: int = 3,
**conv_kwargs,
):
super().__init__()
nonl_class = getattr(nn, nonlinearity) if isinstance(nonlinearity, str) else nonlinearity
conv_kwargs.setdefault("bias", not conv_bn)
for i, (c0, c1) in enumerate(zip(channels[:-1], channels[1:])):
self.add_module(f"Conv{i}", nn.Conv2d(c0, c1, kernel_size, **conv_kwargs))
if conv_bn:
self.add_module(f"ConvBN{i}", nn.BatchNorm2d(c1))
self.add_module(f"ConvNonlinearity{i}", nonl_class())
if maxpool_freq and (i + 1) % maxpool_freq == 0:
self.add_module(f"Maxpool{i//maxpool_freq}", nn.MaxPool2d(2, 2))
self.add_module("Flatten", nn.Flatten())
self.add_module("fc", FCN(lin_sizes, nonlinearity=nonlinearity, bn=linear_bn))
| true |
11a3dd606c22cfce37a4c6274ea4c2ec4b0749e9 | Python | langokalla/TTM4100-2017 | /CHAT_CLIENT/MessageParser.py | UTF-8 | 1,676 | 3.296875 | 3 | [] | no_license | import json
class MessageParser():
def __init__(self, client):
self.possible_responses = {
'error': self.parse_error,
'info': self.parse_info,
'message': self.parse_message,
'history': self.parse_history,
'names': self.parse_names
}
self.client = client
def parse(self, payload):
payload = json.loads(payload.decode())
if payload['response'] in self.possible_responses:
return self.possible_responses[payload['response']](payload)
else:
print("You’ve met with a terrible fate, haven’t you?")
def parse_error(self, payload):
timestamp = payload["timestamp"]
message = payload["content"]
sender = payload["sender"]
print("Sender:\t" + sender + "\t" + timestamp)
print(message)
def parse_info(self, payload):
timestamp = payload["timestamp"]
message = payload["content"]
sender = payload["sender"]
print("Sender:\t" + sender + "\t" + timestamp)
print(message)
print()
def parse_message(self, payload):
timestamp = payload["timestamp"]
message = payload["content"]
sender = payload["sender"]
print("Sender:\t" + sender + "\t" + timestamp)
print(message)
print()
def parse_history(self, payload):
self.parse_message(payload)
def parse_names(self, payload):
timestamp = payload["timestamp"]
message = payload["content"]
sender = payload["sender"]
print("Sender:\t" + sender + "\t" + timestamp)
print(message)
print() | true |
5051a70b36c2be5ac4693e3ae6aab6ab32d869e8 | Python | sprithiv/Algorithm-Design | /largest_independent_set.py | UTF-8 | 2,387 | 3.4375 | 3 | [] | no_license | import random
import time
def find_largest_independent_set(matrix):
n = len(matrix)
#create adjacency list from given matrix
edges = {}
for i in range(0,n):
conn_node = []
for j in range(0,n):
if matrix[i][j] != 0:
conn_node.append(j+1)
edges[i+1] = conn_node
independent_sets = []
for val in edges:
ind_set = find_independent_set(val,edges)
if ind_set not in independent_sets:
independent_sets.append(ind_set)
max = 2
length = len(independent_sets)
for i in range(0,length):
max_val = len(independent_sets[i])
if max_val > max:
max = max_val
for i in range(0,length):
if len(independent_sets[i]) == max:
print independent_sets[i]
def find_independent_set(val,edges):
#variable for total number of vertices
nodes = range(1,len(edges)+1)
for node in edges[val]:
if node in nodes:
nodes.remove(node)
for value in nodes:
if value != val:
for node in edges[value]:
if node in nodes:
nodes.remove(node)
return nodes
def generate_random_graph(n):
a = []
for i in range(0,n):
a.append([])
for j in range(0,n):
a[i].append(0)
for i in range(0,n):
for j in range(0,i):
a[i][j] = a[j][i] = random.randint(0,1)
return a
def main():
#Testcase 1
#matrix = [[0,1,0,1,1,0,0],[1,0,1,1,0,1,0],[0,1,0,1,1,1,1],
# [1,1,1,0,1,1,0],[1,0,1,1,0,1,0],[0,1,1,1,1,0,0],
# [0,0,1,0,0,0,0]]
#Testcase 2
#matrix = [[0,0,1,0,0,1,0,1,1,0,0],[0,0,1,1,1,0,0,1,1,0,1],[1,1,0,1,1,0,1,0,1,0,1],
# [0,1,1,0,1,0,1,1,1,0,0],[0,1,1,1,0,1,1,0,1,1,0],[1,0,0,0,1,0,1,1,1,1,1],
# [0,0,1,1,1,1,0,1,1,0,0],[1,1,0,1,0,1,1,0,1,0,1],[1,1,1,1,1,1,1,1,0,0,1],
# [0,0,0,0,1,1,0,0,0,0,1],[0,1,1,0,0,1,0,1,1,1,0]]
#Find if the graph contains circle
start_time = time.time()
matrix = generate_random_graph(200)
find_largest_independent_set(matrix)
print("Time to compute the largest independent set is", (time.time() - start_time))
if __name__ == "__main__":
main() | true |
eb8086082a7f5f74df5d5acc28d5eac15e25e60a | Python | chaynika/smlAir | /correlation.py | UTF-8 | 364 | 2.796875 | 3 | [] | no_license | # DO NOT RUN, IT RUNS ON final_table.csv SIZE = 1.12 GB. final_table.csv is stored locally on Sarthak's system
# Opens final_table.csv and creates correlation csv wrt destination country
import pandas as pd
df = pd.read_csv('final_table.csv')
a = (df[df.columns[2:1318]].apply(lambda x: x.corr(df['country_destination'])))
print (a)
a.to_csv('Correlation.csv')
| true |
bb7aa5cc3fbddea15d8c7163234c12999cf30bbb | Python | JavierChames/hanoi_tower_python | /test_hanoi.py | UTF-8 | 403 | 2.53125 | 3 | [] | no_license | import hanoy
file=open("test3.txt")
def test_check_disk_content():
result=hanoy.check_disk_content(file)
assert result == [[[1, 2, 3], [], []],hanoy.number_of_disk,[1, 2, 3]]
def test_moves_content():
result=hanoy.moves
assert result == [13, 12, 32, 13, 21, 23, 13]
def test_check_disk_moves():
result=hanoy.result_hanoi
assert result == "YES" | true |
ce7a6ce66726ed06309d86bfbcf32ebc15174363 | Python | noticeable/ExtensionOPs | /nms/test_OBB_nms_gpu.py | UTF-8 | 2,519 | 2.5625 | 3 | [] | no_license | import numpy as np
import torch
import cv2
import math
from OBB_NMS_GPU.r_nms import r_nms
def get_rotated_coors(box):
assert len(box) > 0 , 'Input valid box!'
cx = box[0]; cy = box[1]; w = box[2]; h = box[3]; a = box[4]
xmin = cx - w*0.5; xmax = cx + w*0.5; ymin = cy - h*0.5; ymax = cy + h*0.5
t_x0=xmin; t_y0=ymin; t_x1=xmin; t_y1=ymax; t_x2=xmax; t_y2=ymax; t_x3=xmax; t_y3=ymin
R = np.eye(3)
R[:2] = cv2.getRotationMatrix2D(angle=a*180/math.pi, center=(cx,cy), scale=1) # angle is anti-clkwise
x0 = t_x0*R[0,0] + t_y0*R[0,1] + R[0,2]
y0 = t_x0*R[1,0] + t_y0*R[1,1] + R[1,2]
x1 = t_x1*R[0,0] + t_y1*R[0,1] + R[0,2]
y1 = t_x1*R[1,0] + t_y1*R[1,1] + R[1,2]
x2 = t_x2*R[0,0] + t_y2*R[0,1] + R[0,2]
y2 = t_x2*R[1,0] + t_y2*R[1,1] + R[1,2]
x3 = t_x3*R[0,0] + t_y3*R[0,1] + R[0,2]
y3 = t_x3*R[1,0] + t_y3*R[1,1] + R[1,2]
if isinstance(x0,torch.Tensor):
r_box=torch.cat([x0.unsqueeze(0),y0.unsqueeze(0),
x1.unsqueeze(0),y1.unsqueeze(0),
x2.unsqueeze(0),y2.unsqueeze(0),
x3.unsqueeze(0),y3.unsqueeze(0)], 0)
else:
r_box = np.array([x0,y0,x1,y1,x2,y2,x3,y3])
return r_box
if __name__ == '__main__':
boxes = np.array([[150, 150, 100, 100, -0.1342, 0.99],
[160, 160, 100, 100, 0, 0.88],
[150, 150, 100, 100, -0.7854, 0.66],
[300, 300, 100, 100, -0.2332, 0.77]],dtype=np.float32)
dets_th=torch.from_numpy(boxes).cuda(1)
iou_thr = 0.1
"""
r_nms(dets_th, iou_thr)
Format:
dets_th = type tensor [ [x1, y1, x2, y2, theta, score], [x1, y1, x2, y2, theta, score], ...]
iou_thr = type float
inds = type tensor
"""
inds = r_nms(dets_th, iou_thr)
inds_arr = inds.cpu().numpy()
print(inds)
img = np.zeros((416*2, 416*2, 3), np.uint8)
img.fill(255)
img1 = np.zeros((416*2,416*2,3), np.uint8)
img1.fill(255)
boxes = boxes[:,:-1]
keep_boxes = boxes[inds_arr]
boxes = [get_rotated_coors(i).reshape(-1,2).astype(np.int32) for i in boxes]
for box in boxes:
img = cv2.polylines(img, [box],True,(0,0,255),1)
cv2.imwrite('test_OBB_NMS_GPU_before.png', img)
keep_boxes = [get_rotated_coors(i).reshape(-1,2).astype(np.int32) for i in keep_boxes]
for box in keep_boxes:
img1 = cv2.polylines(img1, [box], True, (0,0,255), 1)
cv2.imwrite('test_OBB_NMS_GPU_after.png', img1)
| true |
b61a4d693cfb1c7721ed97105f126968f4805892 | Python | mingen-pan/Scraping-H1B-visa-info | /example.py | UTF-8 | 439 | 2.703125 | 3 | [
"MIT"
] | permissive | ### For details, please read the "README" file.
### For details, please read the "README" file.
### For details, please read the "README" file.
from h1b_scraper import H1B_Scraper
scraper = H1B_Scraper()
years = [year for year in range(2017, 2019)]
jobs = ["Software Engineer", "Data Scientist"]
tables = scraper.scrape(years, jobs)
print(tables)
table = scraper.extract_specific_table(tables, 2018, "Software Engineer")
print(table)
| true |
6cef56606376e523a9406aea9b5b9c9daca1be83 | Python | bdelliott/eprofile | /eprofile/fixtures.py | UTF-8 | 746 | 2.96875 | 3 | [] | no_license | """ Misc test code to profile. """
import time
from eventlet import greenthread
from eventlet import greenpool
def foo(i):
bar()
return i
def bar():
time.sleep(0.0) # force a greenthread swap
def multi(n=2):
n = int(n)
# Run in multiple greenthreads
pool = greenpool.GreenPool(n)
results = []
for result in pool.imap(foo, range(n)):
results.append(result)
def fib(n):
n = int(n)
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def swap():
t1 = greenthread.spawn(foo, 1)
t2 = greenthread.spawn(wait)
t1.wait()
t2.wait()
def wait():
time.sleep(0.2)
def outer():
inner()
def inner():
return
| true |
f9caa91c24f13a66d52f53189055660ad63f4898 | Python | jun-yoshiyoshi/python_plactice100 | /p33.py | UTF-8 | 405 | 4.15625 | 4 | [] | no_license | # list関数 set関数 &
l1 = ['Python', 'Ruby', 'PHP', 'JavaScript']
l2 = ['Java', 'Ruby', 'Golang', 'Python', 'TypeScript']
new_l = []
for word1 in l1:
for word2 in l2:
if word1 == word2 and word1 not in new_l:
new_l.append(word1)
print(f'共通する値を格納したリスト : {new_l}')
L = list(set(l1) & set(l2))
print(f"共通する値を格納したリスト:{L}")
| true |
73dfedf30ca00ebb1b48b4db9011e0b463a9a654 | Python | indraastra/ita-puzzles | /src/bitvector/bitvector.py | UTF-8 | 1,889 | 3.0625 | 3 | [] | no_license | import util
class BitVector:
NUM_BITS = 500
COUNTER = 0
P_FLIP = 0.2
P_SAME = 1 - P_FLIP
def __init__( self, genome, parent=None, children=set() ):
self.COUNTER += 1
self.num_bits = len( genome )
self.id = self.COUNTER
self.genome = genome
self.parent = parent
self.children = children
def __str__( self ):
return "BitVector<%s>" % self.genome
def add_child( self, bv ):
self.children.add( bv )
bv.parent = self
def set_parent( self, bv ):
self.parent = bv
bv.children.add( self )
def flip_diff( self, bv ):
d = sum( ( self.genome[i] ^ bv.genome[i] ) for i in xrange( len( self.genome ) ) )
return d
def p_parent_of( self, bv ):
return self.p_flipped( self.flip_diff( bv ), len( self.genome ) )
def p_child_of( self, bv=None ):
if not bv: bv = self.parent
if bv:
return self.p_flipped( self.flip_diff( self.parent ), len( self.genome ) )
else:
return 1
@classmethod
def p_flipped( self, flipped, total ):
# util.choose( total, flipped ) * ( self.P_FLIP ** flipped * self.P_SAME ** ( total - flipped ) )
return ( self.P_FLIP ** flipped * self.P_SAME ** ( total - flipped ) )
def p_lineage( population ):
return c_lineage( population ) / float( util.factorial( len( population ) - 1 ) )
def c_lineage( population ):
p = 1
for bv in population:
p *= bv.p_child_of()
return p
if __name__ == "__main__":
f = open( "/home/vishal/Workspace/ita-puzzles/src/bitvector/data/bitvectors-genes.data.small" )
pop = [[int( i ) for i in l.rstrip()] for l in f if l]
a = BitVector( [1] )
b = BitVector( [0] )
c = BitVector( [1] )
a.set_parent( c )
c.set_parent( b )
print "lineage:", p_lineage( [a, b, c] )
| true |
c86eb588677455eef673bf1fbeef786e6235e420 | Python | SherbyRobotics/pyro | /examples/demos_by_system/drone/planar_drone_with_lqr.py | UTF-8 | 912 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 08:27:06 2021
@author: alex
"""
import numpy as np
from pyro.dynamic.drone import Drone2D
from pyro.analysis.costfunction import QuadraticCostFunction
from pyro.dynamic.statespace import linearize
from pyro.control.lqr import synthesize_lqr_controller
# Non-linear model
sys = Drone2D()
sys.xbar = np.array([0,0,0.0,0,0,0])
sys.ubar = np.array([0.5,0.5]) * sys.mass * sys.gravity
# Linear model
ss = linearize( sys , 0.01 )
# Cost function
cf = QuadraticCostFunction.from_sys( sys )
cf.R[0,0] = 0.001
cf.R[1,1] = 0.001
# LQR controller
ctl = synthesize_lqr_controller( ss , cf , sys.xbar , sys.ubar )
# Simulation Closed-Loop Non-linear with LQR controller
cl_sys = ctl + sys
cl_sys.x0 = np.array([-10,-1,0,0,0,0])
cl_sys.compute_trajectory(5)
cl_sys.plot_trajectory('xu')
cl_sys.animate_simulation() | true |
abe71a7a949aba7e465b92eeb137b33539563780 | Python | rasmussn/UO-2015-PHYS-410-510 | /week_7/simple_xy_wr.py | UTF-8 | 1,727 | 3.609375 | 4 | [] | no_license | # the Scientific Python netCDF 3 interface
# http://dirac.cnrs-orleans.fr/ScientificPython/
# from Scientific.IO.NetCDF import NetCDFFile as Dataset
# the 'classic' version of the netCDF4 python interface
# http://code.google.com/p/netcdf4-python/
#from netCDF4_classic import Dataset
from netCDF4 import Dataset
from numpy import arange, dtype # array module from http://numpy.scipy.org
"""
This is a very simple example which writes a 2D array of
sample data. To handle this in netCDF we create two shared
dimensions, "x" and "y", and a netCDF variable, called "data".
This example demonstrates the netCDF Python API.
It will work either with the Scientific Python NetCDF version 3 interface
(http://dirac.cnrs-orleans.fr/ScientificPython/)
of the 'classic' version of the netCDF4 interface.
(http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4_classic-module.html)
To switch from one to another, just comment/uncomment the appropriate
import statements at the beginning of this file.
Jeff Whitaker <jeffrey.s.whitaker@noaa.gov> 20070201
"""
# the output array to write will be nx x ny
nx = 6; ny = 12
# open a new netCDF file for writing.
ncfile = Dataset('simple_xy.nc','w')
# create the output data.
data_out = arange(nx*ny) # 1d array
data_out.shape = (nx,ny) # reshape to 2d array
# create the x and y dimensions.
ncfile.createDimension('x',nx)
ncfile.createDimension('y',ny)
# create the variable (4 byte integer in this case)
# first argument is name of variable, second is datatype, third is
# a tuple with the names of dimensions.
data = ncfile.createVariable('data',dtype('int32').char,('x','y'))
# write data to variable.
data[:] = data_out
# close the file.
ncfile.close()
print '*** SUCCESS writing example file simple_xy.nc!'
| true |
10ea19cc332bf1ee47ca21a68791458eaf68c09f | Python | feature-engine/feature_engine | /tests/test_encoding/test_helper_functions.py | UTF-8 | 815 | 2.953125 | 3 | [
"BSD-3-Clause"
] | permissive | import pytest
from feature_engine.encoding._helper_functions import check_parameter_unseen
@pytest.mark.parametrize("accepted", ["one", False, [1, 2], ("one", "two"), 1])
def test_raises_error_when_accepted_values_not_permitted(accepted):
with pytest.raises(ValueError) as record:
check_parameter_unseen("zero", accepted)
msg = "accepted_values should be a list of strings. " f" Got {accepted} instead."
assert str(record.value) == msg
@pytest.mark.parametrize("accepted", [["one", "two"], ["three", "four"]])
def test_raises_error_when_error_not_in_accepted_values(accepted):
with pytest.raises(ValueError) as record:
check_parameter_unseen("zero", accepted)
msg = f"errors takes only values {', '.join(accepted)}." f"Got zero instead."
assert str(record.value) == msg
| true |
29ca7760e6c78fb8651a5eaa87728a215fcda517 | Python | ojhermann-ucd/comp47600 | /16203034_hermannOtto_practical5/q2/b.py | UTF-8 | 1,158 | 3.015625 | 3 | [] | no_license | # imports
import math
import td_idf
import a
if __name__ == '__main__':
# Documents
document_1 = "I like the talking heads."
document_2 = "I love the talking heads."
document_3 = "I enjoy the talking heads."
document_4 = "I really like the talking heads."
document_5 = "I really enjoy the talking heads."
document_6 = "I really love the talking heads."
document_list = [document_1, document_2, document_3, document_4, document_5, document_6]
# Term Dictionary
term_dict = td_idf.get_terms(document_list)
base = 2
# TF-IDF Dictionary
tf_idf_dict = td_idf.tf_idf_boolean(document_list, term_dict, base)
# Vectors
vector_list = [a.tf_idf_vectors(d, tf_idf_dict) for d in document_list]
# Magnitudes
magnitude_list = [a.magnitude(v) for v in vector_list]
# Cosine Similarities
cs_list = [a.cosine_similarity(vector_list[0], v) for v in vector_list]
# graph data
graph_data = list()
for k in range(len(magnitude_list)):
graph_data.append((magnitude_list[k], cs_list[k]))
print("List of (magnitude, slope) for the data")
for k in range(len(magnitude_list)):
print("Document {}".format(k+1))
print(graph_data[k])
print("")
| true |
5443f17e7e8c428ddca74d4849c67bafbfb1c98c | Python | python-code-assignment/Prarthan.MR | /none_value_in_dictionary.py | UTF-8 | 361 | 2.859375 | 3 | [] | no_license | def dup(data):
for key, value in list(data.items()):
if isinstance(value, dict):
value=dup(value)
else:
if value==None:
data.pop(key)
return data
a={
3:{
'name':None, 'dept_id': 343,'address':{'City':None,'pincode':541283}
}
}
d=dup(a)
print(d)
| true |
de304eedae794b26249e31c0069b2f6b16521097 | Python | vincentpham1991/PythonProjects | /HiddenMarkovModelFraud/HMMArray.py | UTF-8 | 11,490 | 3.390625 | 3 | [] | no_license | import re, string, numpy, pylab
from numpy import *
def getAlphabet(obs):
'''
Retrieves the possible observation values.
Parameters
----------
obs: list of observations
Returns
--------
out: the alphabet as a list
'''
alphabet = []
for word in obs:
for c in word:
if c in alphabet:
pass
else:
alphabet.append(c)
return alphabet
def genPi(nStates):
'''
Generates a uniform initial matrix. For our sake, this is actually not too important
Parameters
----------
nStates: number of states in the model
Returns
--------
out: a uniform 1XN array
'''
val = 1./nStates
Pi = zeros(nStates)
for s in range(nStates):
Pi[s] = val
return Pi
def normalizeArrayRow(array):
dims = array.shape
for row in range(dims[0]):
rowSum = sum(array[row])
for col in range(dims[1]):
array[row][col] /= rowSum
return array
def genA(nStates):
'''
Generates a uniform state transition matrix
Parameters
----------
nStates: number of states in model
Returns
--------
A: a uniform transition matrix
'''
val = 1/nStates
A = zeros((nStates, nStates))
for from_s in range(nStates):
for to_s in range(nStates):
A[from_s][to_s] = val
return A
def forward(obs, A, B, Pi, alphabet):
'''
The 'forward' component of the forward-bacwkard algorithm. The forward variable
alpha(i, t) expresses the total probabilty of ending up in state i at time t, given the observations o1...ot-1 we have seen
It is calculated by summing the probabilities for all incomig arcs at a node
Parameters
----------
obs: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
alpha: a list of length len(O)+1
'''
N = len(obs)
nStates = len(Pi) #number of states
alpha = zeros((nStates, N + 1))
#initialization
for s in range(nStates):
alpha[s][0] = Pi[s]
#induction
for t in range(1, N+1):
o = obs[t-1] #o is the observation at time t
oVal = alphabet.index(o) #oVal is the index of o in our alphabet. we need this to lookup a value in our B matrix
for to_state in range(nStates):
alpha[to_state][t] = 0
for from_state in range(nStates):
alpha[to_state][t] += alpha[from_state][t-1]*A[from_state][to_state]*B[from_state][oVal]
return alpha
def backward(obs, A, B, Pi, alphabet):
'''
The 'backward' component of the forward-bacwkard algorithm. The backward variable
beta(i, t) expresses the total probabilty of seeing the rest of the obseravtion sequence given we were in state i at time t.
We need this moreso to use the forward-backward algorithm
Parameters
----------
obs: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
beta: a list of length len(O)+1
'''
N = len(obs)
nStates = len(Pi)
beta = zeros((nStates, N+1))
#initialization
for s in range(nStates):
beta[s][N] = 1
#induction
for t in range(N-1, -1, -1):
o = obs[t]
oVal = alphabet.index(o)
for from_state in range(nStates):
beta[from_state][t] = 0
for to_state in range(nStates):
beta[from_state][t] += beta[to_state][t+1]*A[from_state][to_state]*B[from_state][oVal]
return beta
def getTotProb(O, t, A, B, Pi, alphabet):
'''
The forward-bacwkard algorithm. gives the probability of witnessing an observation sequence, given a model.
Parameters
----------
obs: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: P(O given our model A, Bi, Pi)
'''
alph = forward(O, A, B, Pi, alphabet)
bet = backward(O, A, B, Pi, alphabet)
rv = 0
N = len(O)+1
nStates = len(Pi)
for state in range(nStates):
rv += alph[state][t]*bet[state][t]
return rv
def prob_itoj_t(O, t, from_state, to_state, A, B, Pi, alphabet):
'''
Get the expected count of the number of transitions from state i to state j at time t
Parameters
----------
obs: visible observations
t: time
from_state: state i
to_state: state j
A, B, Pi, alphabet: the parameters of our model
Returns
--------
num/denom: the desired number above
'''
alph = forward(O, A, B, Pi, alphabet)
bet = backward(O, A, B, Pi, alphabet)
o = O[t]
oVal = alphabet.index(o)
denom = getTotProb(O, t, A, B, Pi, alphabet)
num = alph[from_state][t]*A[from_state][to_state]*B[from_state][oVal]*bet[to_state][t+1]
return (num/denom)
def getGamma(f_state, O, t, A, B, Pi, alphabet):
'''
Gets number of expected transitions out of state i at time t
Parameters
----------
f_state: state i
O: visible observations
t: time
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: the expected no. of transitions out of state i
'''
rv = 0
nStates = len(Pi)
for to_state in range(nStates):
val = prob_itoj_t(O, t, f_state, to_state, A, B, Pi, alphabet)
rv += val
return rv
def getExpecteditoj(fr_state, to_state, O, A, B, Pi, alphabet):
'''
Gets number of expected transitions out of state i to state j
Parameters
----------
f_state: state i
to_state: state j
O: visible observations
t: time
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: the expected no. of transitions out of state i to state j
'''
rv = 0
N = len(O)
for t in range(N):
val = prob_itoj_t(O, t, fr_state, to_state, A, B, Pi, alphabet)
rv += val
return rv
def getExpectedFromi(fr_state, O, A, B, Pi, alphabet):
'''
Gets number of expected transitions out of state i to state j in the entire sequence
Parameters
----------
f_state: state i
O: visible observations
t: time
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: the expected no. of transitions out of state i to state j in the entire sequence
'''
rv = 0
N = len(O)
for t in range(N):
val = getGamma(fr_state, O, t, A, B, Pi, alphabet)
rv += val
return rv
def getExpectediObsK(fr_state, k, O, A, B, Pi, alphabet):
'''
Get the expected number of transitions out of state i given we produce the obseravtion k
Parameters
----------
f_state: state i
k: the observation
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: the expected no. of transitions out of state i given we emit k
'''
rv = 0
N = len(O)
nStates = len(Pi)
for t in range(N):
for to_state in range(nStates):
if (O[t-1] == k):
val = prob_itoj_t(O, t, fr_state, to_state, A, B, Pi, alphabet)
rv += val
return rv
def getAVal(fr_state, to_state, O, A, B, Pi, alphabet):
'''
Expectation step:
Expected number of transitions from state i to state j, so our value for A[i][j]
Parameters
----------
f_state: state i
to_state: state j
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
num/denom: the value for expected A[i][j]
'''
num = getExpecteditoj(fr_state, to_state, O, A, B, Pi, alphabet)
denom = getExpectedFromi(fr_state, O, A, B, Pi, alphabet)
return num/denom
def getBVal(fr_state, k, O, A, B, Pi, alphabet):
'''
Expectation step:
Expected number of transitions from state i emitting observation k, so our value for B[i][k]
Parameters
----------
f_state: state i
k: observation
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
num/denom: the value for expected B[i][k]
'''
num = getExpectediObsK(fr_state, k, O, A, B, Pi, alphabet)
denom = getExpectedFromi(fr_state, O, A, B, Pi, alphabet)
return num/denom
def getPi1(O, A, B, Pi, alphabet):
'''
Expectation step:
Expected number of initial transitions out of state i, so our value for Pi[i]
Parameters
----------
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
num/denom: the value for expected Pi[i]
'''
nStates = len(Pi)
rv = zeros(Pi.shape)
for state in range(nStates):
rv[state] = getGamma(state, O, 0, A, B, Pi, alphabet)
return rv
def getA1(O, A, B, Pi, alphabet):
'''
Run getAVal on all combinations of state i and state j to find a reestimated A matrix
Parameters
----------
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: reestimated A matrix
'''
nStates = len(Pi)
rv = zeros(A.shape)
for fr_state in range(nStates):
for to_state in range(nStates):
rv[fr_state][to_state] = getAVal(fr_state, to_state, O, A, B, Pi, alphabet)
return rv
def getB1(O, A, B, Pi, alphabet):
'''
Run getBVal on all combinations of state i and emission k to find a reestimated B matrix
Parameters
----------
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
rv: reestimated B matrix
'''
nStates = len(Pi)
rv = zeros(B.shape)
for fr_state in range(nStates):
for letter in alphabet:
oVal = alphabet.index(letter) - 1
rv[fr_state][oVal] = getBVal(fr_state, letter, O, A, B, Pi, alphabet)
return rv
def dist(a, b):
return abs(a-b)
#test condition to see if the transition matricies are converging
def aConvTest(A, estA):
dims = A.shape
for fr_state in range(dims[0]):
for to_state in range(dims[1]):
if dist((A[fr_state][to_state]), (estA[fr_state][to_state])) > 0.01:
return False
return True
def bConvTest(B, estB, alphabet):
dims = B.shape
for fr_state in range(dims[0]):
for c in range(dims[1]):
if dist((B[fr_state][c]), (estB[fr_state][c])) > 0.01:
return False
return True
def piConvTest(Pi, estPi):
nStates = len(Pi)
for state in range(nStates):
if dist(Pi[state], estPi[state]) > 0.05:
return False
return True
def getEst(O, A, B, Pi, alphabet):
'''
Maximization step:
we reestimate our matricies until they converge (to 0.05 points within each other)
Parameters
----------
O: visible observations
A, B, Pi, alphabet: the parameters of our model
Returns
--------
A, B, Pi: maximized matricies
'''
A1 = getA1(O, A, B, Pi, alphabet)
B1 = getB1(O, A, B, Pi, alphabet)
Pi1 = getPi1(O, A, B, Pi, alphabet)
while (not aConvTest(A, A1) or (not bConvTest(B, B1, alphabet)) or (not piConvTest(Pi, Pi1))):
A, B, Pi = A1, B1, Pi1
A, B, Pi = getEst(O, A, B, Pi, alphabet)
return A, B, Pi
def readCatFile(file):
'''
loads in the category data
'''
f = open(file)
text = [line.strip() for line in f.readlines()]
rv = ''
for line in text:
rv += line
return rv
#given a model M = (A, B, Pi) and a sequence of observations
# O = (01, 02, ... 0N), return the probabilty of witnessing said sequence
def getProbGivenModel(O, A, B, Pi):
alphabet = getAlphabet(O)
prob = forward(O, A, B, Pi, alphabet)
return prob
def genTrainedModel():
# now we generate a model based on training data, 'categoryData.txt'
# we will use a 2-state HMM because we think the states will represent "luxury goods" and "nececity goods"
trainA = genA(2)
trainPi = genPi(2)
# we think people prefer to buy more expensive goods in the "luxury prefering state", since luxuries tend to be more expensive
# since the Baum-Welch algorithm of the HMM produces local, not global, we initialize our emission matrix so one state prefers
# more expensive purchases
trainB = array([(0.02, 0.03, 0.95), (0.1, 0.03, 0.87)])
#read-in the traning data as our observations, obs
obs = readCatFile('categoryData.txt')[290:390]
alphabet = ['l','h','m']
return getEst(obs, trainA, trainB, trainPi, alphabet)
# we test the probablity at 10-purchase intervals, checking to see if
# the difference between any two purchases
def testModel(A, B, Pi):
a1 = getTotProb(O1, 15, A, B, Pi, ['h', 'm', 'l'])
a2 = getTotProb(O2, 15, A, B, Pi, ['h', 'm', 'l'])
| true |
8f138fc59df40838141393f2c64a7050ff3de74f | Python | brosner/duktape-py | /test/test_duktape.py | UTF-8 | 1,520 | 2.75 | 3 | [
"MIT"
] | permissive | import gc
import tempfile
import duktape
import pytest
# todo: unicode tests everywhere and strings with nulls (i.e. I'm relying on null termination)
def test_create():
duktape.Context()
def test_eval_file():
ctx = duktape.Context()
with tempfile.NamedTemporaryFile() as tf:
tf.write(b"var a = {a: 1, b: 2};")
tf.flush()
ctx.load(tf.name)
assert len(ctx) == 1
def test_stacklen_evalstring():
"test stacklen and evalstring"
ctx = duktape.Context()
assert len(ctx) == 0
ctx.loads("var a = '123';")
assert len(ctx) == 1
def test_error_handling():
ctx = duktape.Context()
with pytest.raises(duktape.Error):
ctx.loads("bad syntax bad bad bad")
def test_gc():
ctx = duktape.Context()
ctx._push("whatever")
ctx.gc()
def test_push_gettype():
"test _push and _type"
ctx = duktape.Context()
def push(x):
ctx._push(x)
return ctx._type()
codes = map(push, [
"123",
123,
123.,
True,
False,
None,
(1, 2, 3),
[1, 2, 3],
[[1]],
{
"a": 1,
"b": "2",
}
])
expected = [str, float, float, bool, bool, type(None), object, object, object, object]
assert [code.as_pytype() for code in codes] == expected
def test_push_get():
ctx = duktape.Context()
for v in ["123", 123., True, False, [1, 2, 3], [[1]], {"a": 1, "b": 2}]:
ctx._push(v)
assert v == ctx._get()
| true |
d84f20886f41b84ab0c5e542c89ade356bd99bf2 | Python | rmkm/SenSim | /sensor/sensor_asyncio.py | UTF-8 | 2,302 | 2.53125 | 3 | [] | no_license | from pytz import timezone
from datetime import datetime
import numpy as np
import time, socket, sys, json, random, yaml, signal, asyncio
import generator
def signal_handler():
print('You pressed Ctrl+C!')
#file.close()
for task in asyncio.Task.all_tasks():
task.cancel()
def reader(socket, start_time):
end_time = time.clock_gettime(time.CLOCK_MONOTONIC)
rtt = (end_time - start_time) * 1000
data = socket.recv(100)
if data is not None:
print("RTT: {} ms".format(rtt))
print("Received:", data.decode())
async def sensor(loop, confDict, delay):
print("==> sensor start")
await asyncio.sleep(delay)
destinationIP = confDict["destinationIP"]
destinationPORT = confDict["destinationPORT"]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(0)
await loop.sock_connect(s, (destinationIP, destinationPORT))
function = confDict["function"]
args = confDict["args"]
numberOfData = confDict["numberOfData"]
sleepTime = confDict["sleepTime"]
for i in range(numberOfData):
data = getattr(generator, function)(args)
print('Send: %s' % data)
start = time.clock_gettime(time.CLOCK_MONOTONIC)
s.send(data.encode())
loop.add_reader(s, reader, s, start)
await asyncio.sleep(sleepTime)
s.close()
print("==> sensor end")
async def worker(loop, confDict):
numberOfSensor = confDict["numberOfSensor"]
sleepTime = confDict["sleepTime"]
tasks = []
if numberOfSensor == 1:
tasks.append(sensor(loop, confDict, 0))
else:
for i in range(numberOfSensor):
#delay = random.uniform(0, sleepTime)
delay = random.uniform(0, 10)
tasks.append(sensor(loop, confDict, delay))
await asyncio.wait(tasks)
def main():
assert len(sys.argv) == 2, 'Usage: "[this_script.py] [config.yaml]"'
PATH = sys.argv[1]
confFile = open(PATH, "r")
confD = yaml.load(confFile.read())
event_loop = asyncio.get_event_loop()
event_loop.add_signal_handler(signal.SIGINT, signal_handler)
try:
event_loop.run_until_complete(worker(event_loop, confD))
finally:
event_loop.close()
if __name__ == '__main__':
main()
| true |
92867f4c7bc6fda3da19bcd1f2fb2b6d80c1c9bc | Python | freechainer/Python_200 | /jupyter_notebook/data/022.py | UTF-8 | 105 | 3.84375 | 4 | [] | no_license | a = True
b = False
print(a == 1) # True가 출력됨
print(b != 0) # False가 출력됨
| true |
b35c79abd59f6b80d6a93fb724e09862046d947e | Python | nagellette/pub1_shape_to_raster | /labels_to_raster.py | UTF-8 | 1,042 | 2.6875 | 3 | [] | no_license | from osgeo import gdal, ogr
'''
Rasterize code reference: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html#get-raster-metadata
'''
# Define pixel_size and NoData value of new raster
pixel_size = 5.0
NoData_value = -9999
# Filename of input OGR file
vector_fn = './data/bejing_reduced_projected_reduced.shp'
# Filename of the raster Tiff that will be created
raster_fn = './data/bejing_reduced_projected_labels.tif'
# Open the data source and read in the extent
source_ds = ogr.Open(vector_fn)
source_layer = source_ds.GetLayer()
x_min, x_max, y_min, y_max = source_layer.GetExtent()
# Create the destination data source
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
target_ds = gdal.GetDriverByName('GTiff').Create(raster_fn, x_res, y_res, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size))
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(NoData_value)
# Rasterize
gdal.RasterizeLayer(target_ds, [1], source_layer, burn_values=[255]) | true |
46439e97fc0de71ddc98172d3f8652d563f481ba | Python | ArgonneDetectorGroup/fit_tools | /fit_tools.py | UTF-8 | 1,679 | 3.03125 | 3 | [] | no_license | #By: Faustin Carter (faustin.carter@gmail.com), 2017
import numpy as np
import lmfit as lf
import warnings
def do_lmfit(xdata, ydata, fit_fn, params, **kwargs):
"""Run any fit from models on your data.
Paramters
---------
xdata : np.array
The points at which to calculate the model.
ydata : np.array
The data to compare to the calculated model.
fit_fn : callable
Model function to pass to minimizer. Must have signature"""
#pop model kwargs off the top
model_kwargs = kwargs.pop('model_kwargs', {})
#Override any of the default Parameter settings
if kwargs is not None:
for key, val in kwargs.items():
#Allow for turning on and off parameter variation
if '_vary' in key:
key = key.split('_')[0]
if key in params.keys():
assert ((val is True) or (val is False)), "Must pass bool for vary"
params[key].vary = val
#Allow for overriding the range
elif '_range' in key:
key = key.split('_')[0]
if key in params.keys():
assert len(val) == 2, "Must pass min and max for range! np.inf or -np.inf are OK."
params[key].min = val[0]
params[key].max = val[1]
#Allow for overriding the default guesses
elif key in params.keys():
params[key].value = val
else:
warnings.warn("Unknown keyword: "+key, UserWarning)
minObj = lf.Minimizer(fit_fn, params, fcn_args=(xdata, ydata), fcn_kws=model_kwargs)
fit_result = minObj.minimize()
return fit_result
| true |
b8fb03c066c80b185f71adefbe13cb6f24abac1e | Python | lesswrong-ru/generate-books | /html2fb2/compressedfile.py | UTF-8 | 6,859 | 3.15625 | 3 | [] | no_license | """Module to offer simple compressed file support by assuming the file
in interest is contained in a zip file. Allows reading to a file,
or writing to a file (but not both and not appending).
Right now this module deals with real files on disk (it uses im memory
for performance for the intermediate files so there are no temporary
on-disk files). It does not support file objects as input. This is
next on the TODO list.
TODO add support for .name attribute:
From http://docs.python.org/lib/bltin-file-objects.html
If the file object was created using open(), the name of the file. Otherwise, some string that
indicates the source of the file object, of the form "<...>". This is a read-only attribute and
may not be present on all file-like objects.
cStringIO can NOT be used to create attributes, e.g.:
import cStringIO as StringIO
file_ptr = StringIO.StringIO('hello')
file_ptr.name = 'pants'
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'cStringIO.StringI' object has no attribute 'name'
But StringIO can be! Probably should wrap around instead.
TODO consider support of different compress and archive formats:
.Z
.gz
.tar
.tgz
tar.gz
tar.z
import gzip
text_file_object = open(text_filename + '.gz', 'wb')
gzip_fileptr = gzip.GzipFile(os.path.basename(text_filename), fileobj=text_file_object)
return gzip_fileptr
"""
try:
#raise ImportError
import cStringIO as StringIO
except ImportError:
import StringIO
import os
import time
import zipfile
def ro_open_zipfile(file_name, read_mode=None, extension_of_interest=None):
"""Read only open of either a text file or a zip file and returns regular file object
if opening a zip file, opens the first file that ends with: extension_of_interest
e.g. extension_of_interest='.txt'
e.g. extension_of_interest='.rtf'
if file is a zip file the contents are read in binary mode (under Windows)
file_name is used pretty much as-is so recommend using ASCII filenames and not latin1, Unicode (utf16) or bytes containing utf-8
TODO consider allowing extension_of_interest to be either a
single string or a list/tuple/etc. of strings,
e.g. ['*.html', '*.htm']"""
if read_mode is None:
read_mode='rb'
if not read_mode.startswith('r'):
raise(NotImplementedError, 'only support reading files')
file_ptr = None
if not file_name.lower().endswith('.zip'):
# assume this is a raw file
try:
file_ptr = open(file_name, read_mode)
except IOError, info:
if info.errno == 2:
# [Errno 2] No such file or directory
file_name = file_name + '.zip' # ignoring upper case,
if file_ptr is None:
# this is a zip file
zipfile_ptr = zipfile.ZipFile(file_name, 'r') # 'r' is binary for ZipFile
textfile_name=None
if extension_of_interest is None:
# find the first file in the zip file and use that
textfile_name=zipfile_ptr.namelist()[0]
else:
f_ext = extension_of_interest.lower()
# find first f_ext file
for name in zipfile_ptr.namelist():
if name.lower().endswith(f_ext):
textfile_name = name
break
file_ptr = StringIO.StringIO(zipfile_ptr.read(textfile_name))
return file_ptr
class ZipFileWrapper:
def __init__(self, zipfileptr, zipinfo):
self._zipfileptr = zipfileptr
self._zipinfo = zipinfo
self._fileptr = StringIO.StringIO()
def __getattr__(self, attr):
if self.__dict__.has_key(attr):
return self.__dict__[attr]
else:
return getattr(self._fileptr, attr)
def close(self, *args, **kwargs):
self._zipfileptr.writestr(self._zipinfo, self._fileptr.getvalue())
self._zipfileptr.close()
self._fileptr.close()
## TODO disallow more writes/closes....
def wo_open_zipfile(file_name, write_mode=None):
"""Write only open of zip file where the returned file handle is
to the file inside and returns regular file object that can be written to
file_name is used pretty much as-is so recommend using ASCII filenames and not latin1, Unicode (utf16) or bytes containing utf-8"""
if write_mode is None:
write_mode='wb'
if not write_mode.startswith('w'):
raise(NotImplementedError, 'only support writting new files')
if not file_name.lower().endswith('.zip'):
zipfile_name = file_name + '.zip'
newfile_name_inzip = os.path.basename(file_name)
else:
zipfile_name = file_name
newfile_name_inzip = os.path.splitext(os.path.basename(file_name))[0]
now = time.localtime(time.time())[:6]
myzipfile = zipfile.ZipFile(zipfile_name, "w") # 'w' is binary for ZipFile
info = zipfile.ZipInfo(newfile_name_inzip)
info.date_time = now
info.compress_type = zipfile.ZIP_DEFLATED
return ZipFileWrapper(myzipfile, info)
def open_zipfile(name, mode=None):
"""Emulate stdlib open but using compressed files for input and output.
Can open uncompressed files for reading as well as compressed.
Opening files for write ALWAYS creates compressed files.
Does not handle optional buffering argument"""
if mode is None:
mode='rb'
if mode.startswith('r'):
return ro_open_zipfile(name, mode)
elif mode.startswith('w'):
return wo_open_zipfile(name, mode)
else:
raise(NotImplementedError, 'unsupported file open mode')
if __name__ == '__main__':
zipfile_name = "D:\\dloads\\usenet\\Pan\\onppc\\Foster,Alan Dean -11 Running From The Deity_txt.zip"
#tempfile = ro_open_zipfile(zipfile_name, 'r')
tempfile = ro_open_zipfile(zipfile_name, 'r', '.txt')
bytes_to_read=10
data = tempfile.read(bytes_to_read)
print len(data), repr(data)
print len(data), data
tempfile.close()
zipfile_name = "D:\\tmp\Deity.txt.zip"
zipfile_name = "D:\\tmp\Deity.txt"
tempfile = wo_open_zipfile(zipfile_name)
tempfile.write('hello world.')
tempfile.write('goodbye world')
tempfile.close()
#tempfile.close() ## bad! we closed this already :-)
#tempfile.write("I'm zipped!") ## bad! we closed this already :-)
tempfile=open_zipfile(zipfile_name, 'w')
tempfile.write("I'm zipped!")
tempfile.close()
tempfile=open_zipfile(zipfile_name)
print tempfile.read()
tempfile.close()
| true |
5f65828b82eb8f769de7a00bb18c9b6adba51a3d | Python | SashaVin/project02 | /project02.py | UTF-8 | 319 | 3.234375 | 3 | [] | no_license | r = str.lower(input())
u = r.split(' ')
t = ''.join(u)
k = len(t) - 1
a = 0
q = 1
while k - a >= a:
if t[k - a] == t[a]:
a = a + 1
else:
q = 0
break
if q == 1:
print('Ого, это палиндром!')
if q == 0:
print('К сожалению, это не палиндром(')
| true |
5aacd324f7f8b9d64a39cbc2c05b9d45f6ad361c | Python | GRSEB9S/maxent-srl | /basic_struct.py | UTF-8 | 368 | 2.734375 | 3 | [
"MIT"
] | permissive | from collections import namedtuple
Frame = namedtuple('Frame', ['start', 'end', 'name'])
NodePosition = namedtuple('NodePosition', ['start', 'end'])
class Context(object):
def __init__(self, sentence, parse_tree, frame, node_pos):
self.sentence = sentence
self.parse_tree = parse_tree
self.frame = frame
self.node_pos = node_pos
| true |
4dba65ab47c0c48990a5d0f3125f1195ab381fdd | Python | Jack-Rutland/WorkoutStats | /src/main.py | UTF-8 | 1,571 | 3 | 3 | [] | no_license | '''
WORKOUT STATISTICS TRACKER
---------------------------------------------------------------------
AUTHOR : Jack Rutland
Description : A windows application written in python to log and
display information about your workouts.
'''
#IMPORTS
from os import listdir
from os import getcwd
from os.path import splitext
import kivy
from workout import Lift
from workout import Workout
import pickle
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.tabbedpanel import TabbedPanel
Builder.load_file('workoutstats.kv')
'''
This is how pickle works
lift1 = Lift('bench',3,10)
workout1 = Workout('pushing')
workout1.addLift(lift1)
print(workout1)
pickle_out = open('lift.workout','wb')
pickle.dump(workout1, pickle_out)
pickle_out.close()
pickle_in = open('lift.workout','rb')
workout2 = pickle.load(pickle_in)
print(workout2)
'''
class WorkoutStats(TabbedPanel):
PATH = getcwd()
#Returns the stems of files in the root/workouts directory as a list
def fetch_workout_list(self) :
return [splitext(f)[0] for f in listdir(self.PATH + r'/workouts')]
#Reads the workout file and update the information in the workout editor
def read_workout_file(self, text) :
pickle_in = open (text + '.workout' , 'rb')
workout = pickle.load(pickle_in)
class WSApp(App):
def build(self):
self.title = 'Workout Stats BETA'
self.icon = 'assets/ws_icon.png'
return WorkoutStats()
if __name__ == '__main__':
WSApp().run() | true |
fa6753e953578d93051b8f045a036a32fe58f7bd | Python | 13824125580/communication | /newbsp.py | UTF-8 | 4,799 | 2.796875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import numpy as np
from math import pi
import matplotlib.pyplot as plt
import matplotlib
import scipy.signal as signal
import math
#码元数
size = 4
sampling_t = 0.01
t = np.arange(0, size, sampling_t)
print(t)
# 随机生成信号序列
a = np.random.randint(0, 2, size)
for i in range(size):
a[i] = (i+1) % 2
print(a)
m = np.zeros(len(t), dtype=np.float32)
for i in range(len(t)):
m[i] = a[math.floor(t[i])] *6/10
print(m[i])
fig = plt.figure(figsize=(18,12))
ax1 = fig.add_subplot(7, 1, 1)
fc = 1
fs = 20 * fc # 采样频率
ts = np.arange(0, 40 / fs, 1 / (10*fs))
# 解决set_title中文乱码
zhfont1 = matplotlib.font_manager.FontProperties(fname = '/usr/share/fonts/truetype/arphic/ukai.ttc')
ax1.set_title('genernate random binary signal', fontproperties = zhfont1, fontsize = 20)
plt.axis([0, 40/fs, -0.5, 1.5])
plt.plot(ts, m, 'b')
print(ts.shape)
print(ts)
d=np.dot(2 * pi * fc, ts)
print(d)
coherent_carrier = 2*np.cos(d)
# bpsk = np.cos(np.dot(2 * pi * fc, ts) + pi * (m - 1) + pi / 4)
bpsk = 2*np.cos(d + pi * (m - 1))
mmm = (np.cos(2*d + pi * (m - 1)))/1
axm = fig.add_subplot(7, 1, 2)
plt.axis([0, 40/fs, -1.5, 1.5])
plt.plot(ts, mmm, 'b')
cosline= np.cos(pi*(m-1))/1
axn = fig.add_subplot(7, 1, 3)
plt.axis([0, 40/fs, -1.5, 1.5])
plt.plot(ts, cosline, 'b')
mmmm = (np.cos(2*d + pi * (m - 1)) + np.cos(pi*(m-1)))/1
axp = fig.add_subplot(7, 1, 4)
plt.axis([0, 40/fs, -1.5, 1.5])
plt.plot(ts, mmmm, 'b')
# bpsk = np.cos(np.dot(2 * pi * fc, ts) + pi * (m - 1))
# bpsk = np.cos(np.dot(2 * pi * fc, ts))
# BPSK调制信号波形
ax2 = fig.add_subplot(7, 1, 5)
ax2.set_title('BPSK modulation signal', fontproperties=zhfont1, fontsize=20)
plt.axis([0,40/fs,-1.5, 1.5])
plt.plot(ts, bpsk, 'r')
# 定义加性高斯白噪声
def awgn(y, snr):
snr = 10 ** (snr / 10.0)
xpower = np.sum(y ** 2) / len(y)
npower = xpower / snr
return np.random.randn(len(y)) * np.sqrt(npower) + y
# 加AWGN噪声
noise_bpsk = awgn(bpsk, 5)
noise_bpsk = bpsk
# BPSK调制信号叠加噪声波形
ax3 = fig.add_subplot(7, 1, 6)
ax3.set_title('BPSK modulation add noise', fontproperties = zhfont1, fontsize = 20)
plt.axis([0, 40/fs, -1.5, 1.5])
plt.plot(ts, noise_bpsk, 'r')
ax4 = fig.add_subplot(7, 1, 7)
ax4.set_title('BPSK carrier', fontproperties = zhfont1, fontsize = 20)
plt.axis([0,40/fs,-1.5, 1.5])
plt.plot(ts, coherent_carrier, 'r')
# 带通椭圆滤波器设计,通带为[2000,6000]
[b11,a11] = signal.ellip(5, 0.5, 60, [2000 * 2 / 80000, 6000 * 2 / 80000], btype = 'bandpass', analog = False, output = 'ba')
# 低通滤波器设计,通带截止频率为2000Hz
[b12,a12] = signal.ellip(5, 0.5, 60, (2000 * 2 / 80000), btype = 'lowpass', analog = False, output = 'ba')
# 通过带通滤波器滤除带外噪声
# bandpass_out = signal.filtfilt(b11, a11, noise_bpsk)
bandpass_out = noise_bpsk
# 相干解调,乘以同频同相的相干载波
# coherent_demod = bandpass_out * (coherent_carrier * 2)
coherent_demod = bandpass_out * coherent_carrier * 1
print(coherent_demod )
# 通过低通滤波器
# lowpass_out = signal.filtfilt(b12, a12, coherent_demod)
lowpass_out = coherent_demod
fig2 = plt.figure(figsize=(16,8))
bx1 = fig2.add_subplot(3, 1, 1)
bx1.set_title('local down frequency and pass low band filter', fontproperties = zhfont1, fontsize=20)
plt.axis([0, 40/fs, -1.5, 1.5])
plt.plot(ts, lowpass_out, 'r')
#抽样判决
detection_bpsk = np.zeros(len(t), dtype=np.float32)
flag = np.zeros(size, dtype=np.float32)
print(detection_bpsk)
for i in range(size):
tempF = 0
for j in range(100):
# print(lowpass_out[i * 100 + j])
tempF = tempF + lowpass_out[i * 100 + j]
print(tempF)
if tempF > 0:
flag[i] = 1
else:
flag[i] = 0
for i in range(size):
if flag[i] == 0:
for j in range(100):
detection_bpsk[i * 100 + j] = 0
else:
for j in range(100):
detection_bpsk[i * 100 + j] = 1
bx2 = fig2.add_subplot(3, 1, 2)
bx2.set_title('bpskxinhao chouyangpanjuehoude xinhaop', fontproperties = zhfont1, fontsize=20)
plt.axis([0, 40/fs, -0.5, 1.5])
plt.plot(ts, detection_bpsk, 'r')
fft_size = 512 + 256 +128
fft_size = 2048
fft_size = 400
xs = noise_bpsk[:fft_size]
xs = coherent_demod[:fft_size]
# xs = coherent_carrier[:fft_size]
xf = np.fft.rfft(xs)/fft_size
freqs = np.linspace(0, fs/2, fft_size/2+1)
xfp = 200*np.log10(np.clip(np.abs(xf), 1e-20, 1e100))
bxm = fig2.add_subplot(3, 1, 3)
bxm.set_title('BPSK xinhao chouyang panjue hou de xinhao', fontproperties = zhfont1, fontsize=20)
plt.axis([0, 10, -1024, 150])
plt.plot(freqs, xfp, 'r')
plt.show()
| true |
11b68ce96864361f1e99b2ecd8e18ecf24aba42f | Python | DreamonZhu/AI | /ML/zhoubo_ML_DL_2018_10/9_1.py | UTF-8 | 2,886 | 3.078125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
df = pd.read_csv('./data/advertising.csv')
# print(df[['TV', 'Radio', 'Newspaper']])
x = df[['TV', 'Radio', 'Newspaper']]
y = df['Sales']
# print(type(x)) # <class 'pandas.core.frame.DataFrame'>
# print(type(y)) # <class 'pandas.core.series.Series'>
mpl.rcParams['font.sans-serif'] = ['simHei']
mpl.rcParams['axes.unicode_minus'] = False
# 绘图1
# plt.figure(facecolor='w')
# plt.plot(df['TV'], y, 'ro', label='TV')
# plt.plot(df['Radio'], y, 'g^', label='Radio')
# plt.plot(df['Newspaper'], y, 'mv', label='Newspaper')
# plt.xlabel('广告消费总额', fontsize=16)
# plt.ylabel('销售额', fontsize=16)
# plt.title('广告花费与销售额对比数据', fontsize=18)
# plt.grid(True, ls=":")
# plt.show()
# 绘图2
# plt.figure(facecolor='w', figsize=(9, 10))
# plt.subplot(311)
# plt.plot(df['TV'], y, 'ro')
# plt.title('TV')
# plt.grid(b=True, ls=':')
# plt.subplot(312)
# plt.plot(df['Radio'], y, 'g^')
# plt.title('Radio')
# plt.grid(True, ls=':')
# plt.subplot(313)
# plt.plot(df['Newspaper'], y, 'b*')
# plt.title('Newspaper')
# plt.grid(True, ls=':')
# plt.tight_layout() # 自动调整每个子图之间的间距
# plt.show()
# 训练
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
# print(x_train.shape)
# print(x_test.shape)
#
# print(y_train.shape)
# print(y_test.shape)
lr = LinearRegression()
lr.fit(x_train, y_train)
# print(lr.coef_, lr.intercept_)
# # y_pre = lr.predict(x_test)
# print(lr.score(x_test, y_test))
# y_hat = lr.predict(x_test)
# mse = np.average((y_hat - y_test)**2)
# print(mse) # 1.9918855518287906
# print(y_test)
order_1 = y_test.argsort(axis=0) # 返回的从小到大的原数组元素的索引
# order_2 = y_test.argsort(axis=1)
# order_3 = y_test.argsort()
# print(order_1)
# print(order_1.shape)
# print(order_2)
# print(order_3)
# 以下是DataFrame的使用,会报错
# # order_2 = x_test.argsort()
# order_1 = x_test.argsort(axis=1)
# print(order_1)
# # print(order_2)
y_test = y_test.values[order_1]
x_test = x_test.values[order_1, :]
# print(y_test)
y_hat = lr.predict(x_test)
mse = np.average((y_hat - y_test)**2)
# mse_1 = np.average((y_hat - np.array(y_test))**2)
print(mse) # 1.9918855518287906
# print(mse_1)
# print('R2 = ', lr.score(x_train, y_train))
# print('R2 = ', lr.score(x_test, y_test))
plt.figure(facecolor='w')
x = np.arange(len(x_test))
# print(len(x_test)) # len(DataFrame) 返回的是较长的维度的数值,shape是返回两个维度的元祖
plt.plot(x, y_test, 'r^-', label='真实值')
plt.plot(x, y_hat, 'g-', label='预测值')
plt.legend(loc='upper left')
plt.title('线性回归预测销量', fontsize=18)
plt.grid(True, ls=':')
plt.show()
| true |
c73656bce92d7e8ccaaf4bc861b19db9128fd162 | Python | SreeramSP/Python-Function-Files-and-Dictionary-University-of-Michigan- | /#Write a function, accum, that takes a list.py | UTF-8 | 217 | 3.796875 | 4 | [] | no_license | #Write a function, accum, that takes a list of integers as input and returns the sum of those integers.
def accum(l):
i=0
for j in l:
i=i+j
return i
list1=[1,2,3,4,5]
print(accum(list1))
| true |
731ff229aa3f41ee456eabaf012fdd5529f98f38 | Python | nir-takemi/text_verifier | /verify_text.py | UTF-8 | 401 | 2.609375 | 3 | [
"MIT"
] | permissive | import sys
from textverifier import text_verifier as tv
def main():
args = sys.argv
if len(args) < 2 :
raise ValueError('''
Specify at least 1 argument.
usage:
python3 text_verifier.py "dir or file_path"
''')
# Execute
text_verifier = tv.TextVerifier(args[1])
text_verifier.verify()
if __name__ == '__main__':
main() | true |
2e3504b4d6fb68998838e051853f97a119064cf1 | Python | ramostitoyostin-unprg/T08_RAMOS.SANCHEZ | /manipuladores_de_textos/longitud.py | UTF-8 | 2,627 | 3.859375 | 4 | [] | no_license | #MANIPULACION DE TEXTOS
#manipulador de texto nro 1
# 0123456789012345678
cadena="HOY TE IRÁ MUY BIEN"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro2
# 01234567890123456789012
cadena="SONRIE LA VIDA ES ÚNICA"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 3
# 01234567890123456789
cadena="LA VIDA ES UN REGALO"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 4
# 0123456789012345678901
cadena="INGENIERÍA ELECTRONICA"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 5
# 012345678901234567890
cadena="INDEXACION DE CADENAS"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 6
# 0123456789012345678901234567890123456
cadena="UNIVERSIDAD NACIONAL PEDRO RUIZ GALLO"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 7
# 0123456789012345678901
cadena="CURSO DE PROGRMACION I"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 8
# 01234567890123456789012
cadena="OPERACIONES CON CADENAS"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 9
cadena="ACUERDATE DE MI"
print(cadena, len(cadena))
#manipulador de texto nro 10
# 012345678901234
print(len("hola mundo \r\n")) #en este caso la cadena tiene 13 elementos se excluyen los backslash
#manipulador de texto nro 11
cadena=" "
print(len(cadena)) #tendria un elemento que es el espacio
#manipulador de texto nro 12
# 012345678901234567890123456789
cadena="21313 ; ES EL NUMERO DE CUENTA"
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 13
# 0123456789012345
cadena=" HOLA MUNDO "
print("la cadena",cadena,"tiene :",len(cadena),"elementos")
#manipulador de texto nro 14
# 012345678901234567890123456789
cadena=" \\ es una barra invertida"
print(cadena, len(cadena))#en este caso el backslash es contado como un elemento , cadena tiene 25 elemntos
#manipulador de texto nro 15
# 10 20 30 40
# 01234567890123456789012345678901234567890
cadena="OPERACIONES CON CADENAS \"ES MUY SIMPLE\""
print("la cadena",cadena,"tiene :",len(cadena),"elementos")#los elementos \" son contadas como un elemento
#manipulador de texto nro 16
# 10 20 30
# 0123456789012345678901234567890123456789
str="PYTHON ES UN \"LENGUAJE DE PROGRMACION\"" #los elemntos \" son contadas como un elemento
print(len(str))
| true |
edeae1da5b26a923d1e55a6983926a633eeeccb2 | Python | sntran/trello-broker | /trello.py | UTF-8 | 4,769 | 2.734375 | 3 | [] | no_license | from packages import requests
import re
from brokers import BaseBroker
API_KEY = "d151447bdc437d1089c16011ff1933cf"
API_SECRET = "8889354115ce172246e3c0335fb0e4527c1ecafb5ac1437a7656356f1eb3b191"
BASE_URL = "https://api.trello.com/1"
MEMBER_URL = BASE_URL + "/tokens/%s/member"
BOARD_CARD_URL = BASE_URL + "/boards/%s/cards/%s"
CARD_URL = BASE_URL + "/cards/%s"
COMMENT_URL = CARD_URL + "/actions/comments"
ASSIGN_MEMBER_URL = CARD_URL + "/members"
def getCard(self, cardId, fields = ''):
"""Get the card data based on its short ID.
Keyword arguments:
cardId -- the short id of the card to query.
fields -- a list of fields to return, default to none.
"""
params = {'token': self.token, 'key': API_KEY, 'fields': fields}
return requests.get(BOARD_CARD_URL % (self.board, cardId),
params=params).json
class TrelloBroker(BaseBroker):
__cardMap = dict()
def handle(self, payload):
self.board = payload['service']['board']
self.token = payload['service']['token']
del payload['service']
del payload['broker']
for commit in payload['commits']:
self.handleCommit(commit)
def handleCommit(self, commit):
pattern = re.compile(r"""
( # start capturing the verb
fix # contains 'fix'
| close # or 'close'
| # or just to reference
) # end capturing the verb
e? # maybe followed by 'e'
(?:s|d)? # or 's' or 'd', not capturing
\s # then a white space
tr[#] # and 'tr#' to indicate the card
([0-9]+) # with the card's short id.
""", re.VERBOSE | re.IGNORECASE)
actions = pattern.findall(commit['message'])
for (action, cardId) in actions:
if action.lower() == 'fix' or action.lower() == 'close':
self.closeCard(cardId, commit)
else:
self.referenceCard(cardId, commit)
def referenceCard(self, cardId, commit):
"""Post the commit message as a comment and assign the author.
To perform any update to the card, card's fullID is required
instead of shortID. Therefore, need to query the fullID.
However, to avoid performing too many requests, a hash map
is used to lazy load the full ID.
Keyword arguments:
cardId -- the id of the card to perform actions to.
commit -- the commit dict with message to comment.
"""
if cardId not in self.__cardMap:
"""Lazy loading of card's full ID"""
card = getCard(self, cardId)
self.__cardMap[cardId] = card['id']
post_load = {'text': commit['message'], 'token': self.token, 'key': API_KEY}
res = requests.post(COMMENT_URL % (self.__cardMap[cardId]), data=post_load).json
authorId = res['idMemberCreator']
post_load = {'value': authorId, 'token': self.token, 'key': API_KEY}
requests.post(ASSIGN_MEMBER_URL % self.__cardMap[cardId], data=post_load)
def closeCard(self, cardId, commit):
"""Post the commit message as a comment and close the card.
Keyword arguments:
cardId -- the id of the card to perform actions to.
commit -- the commit dict with message to comment.
"""
# Comment the commit to the card
self.referenceCard(cardId, commit)
# Close / Archive the card
put_load = {'closed': 'true', 'token': self.token, 'key': API_KEY}
requests.put(CARD_URL % self.__cardMap[cardId], data=put_load)
if (__name__ == '__main__'):
broker = TrelloBroker()
payload = {
'broker': u'twitter',
'commits': [{ 'author': u'jespern',
'files': [{'file': u'media/css/layout.css',
'type': u'modified'},
{'file': u'apps/bb/views.py',
'type': u'modified'},
{'file': u'templates/issues/issue.html',
'type': u'modified'}],
'message': u'adding bump button, issue #206 fixed',
'node': u'e71c63bcc05e',
'revision': 1650,
'size': 684}],
'repository': { 'absolute_url': u'/jespern/bitbucket/',
'name': u'bitbucket',
'owner': u'jespern',
'slug': u'bitbucket',
'website': u'http://bitbucket.org/'},
'service': {'token': u'4f63dd5fe5faf4d83d01727f', u'board': u'4f63dd5fe5faf4d83d01727f'}}
broker.handle(payload) | true |
f3c50bbb6de8159ac50fcb5a9971fa7376b56cbe | Python | Shilpa106/notify_CLI | /sqlite_database/ex6.py | UTF-8 | 1,167 | 3.421875 | 3 | [] | no_license | import sqlite3
from sqlite3 import Error
def sql_connection():
try:
conn = sqlite3.connect('mydatabase.db')
return conn
except Error:
print(Error)
def sql_table(conn, rows):
cursorObj = conn.cursor()
# Create the table
cursorObj.execute("CREATE TABLE salesman(salesman_id n(5), name char(30), city char(35), commission decimal(7,2));")
sqlite_insert_query = """INSERT INTO salesman
(salesman_id, name, city, commission)
VALUES (?, ?, ?, ?);"""
cursorObj.executemany(sqlite_insert_query, rows)
conn.commit()
print("Number of records after inserting rows:")
cursor = cursorObj.execute('select * from salesman;')
print(len(cursor.fetchall()))
# Insert records
rows = [(5001,'James Hoog', 'New York', 0.15),
(5002,'Nail Knite', 'Paris', 0.25),
(5003,'Pit Alex', 'London', 0.15),
(5004,'Mc Lyon', 'Paris', 0.35),
(5005,'Paul Adam', 'Rome', 0.45)]
sqllite_conn = sql_connection()
sql_table(sqllite_conn, rows)
if (sqllite_conn):
sqllite_conn.close()
print("\nThe SQLite connection is closed.") | true |
842205d12b907dc8e3382b80bb1dc8f38f895935 | Python | matteo-peltarion/classification-toolkit | /palladio/config/konfiguration.template-mlc.py | UTF-8 | 3,292 | 2.5625 | 3 | [] | no_license | """
Konfiguration template file for image classification task
"""
from palladio.utils import get_data_augmentation_transforms
from torch.utils.data import RandomSampler
from palladio.networks.utils import get_network as pd_get_network
from torch.nn import BCEWithLogitsLoss
from sklearn.metrics import (
precision_score, recall_score, f1_score, accuracy_score)
########################
####### Settings ####### #noqa
########################
EXPERIMENT_NAME = "MLC"
# Dataset
# Data augmentation/transformation
DATA_AUGMENTATION_LEVEL = 0
INPUT_NORMALIZATION = None
# Load datasets
train_transforms = get_data_augmentation_transforms(
DATA_AUGMENTATION_LEVEL, INPUT_NORMALIZATION)
# XXX Set training dataset here
train_set = None
# Input transformation
# For validation have data augmentation level set to 0 (NO DA)
val_transforms = get_data_augmentation_transforms(
0, INPUT_NORMALIZATION)
# XXX Set validation dataset here
val_set = None
# This variable should be a dictionary whose keys are the index of a class and
# the values are strings describing the class.
class_map_dict = None
# Misc
# Specify loss
criterion = BCEWithLogitsLoss()
def build_metrics(outputs, targets):
"""
Compute stats based on output and target. Returns a dictionary containing
all metrics that should be logged.
Parameters
----------
outputs : torch.Tensor
The result of a forward pass of the net
target :
The target
Return
------
dict : The aggregated metrics
"""
# Classification problem: extract predicted labels
predicted = (outputs > 0.5).int()
metrics_functions = {
'precision': precision_score,
'recall': recall_score,
'f1': f1_score,
}
metrics = dict()
for m, mf in metrics_functions.items():
for avg in ['micro', 'macro']:
metrics[f'{m}_{avg}'] = mf(targets, predicted, average=avg)
if class_map_dict is not None:
# Add accuracy to metrics
metrics_functions['accuracy'] = accuracy_score
# Per class metrics
for i, c in class_map_dict.items():
# Sanitize name of the class
cc = c.lower().replace(" ", "_")
for m, mf in metrics_functions.items():
metrics[f'{m}_{cc}'] = mf(
targets[:, i].cpu(), predicted[:, i].cpu())
return metrics
def print_batch_log(outputs, targets, loss, logger, batch_idx,
n_batches, print_every, subset):
"""
Outputs are logits, targets
"""
STATUS_MSG = "[{}] Batches done: {}/{} | Loss: {:04f} | F1 (micro): {:04f} | F1 (macro): {:04f}" # noqa
metrics = build_metrics(outputs, targets)
if (batch_idx + 1) % print_every == 0:
logger.info(STATUS_MSG.format(
subset,
batch_idx+1,
n_batches,
loss/(batch_idx+1),
metrics["f1_micro"],
metrics["f1_macro"]))
###################
####### END ####### #noqa
###################
train_sampler = RandomSampler(train_set)
val_sampler = RandomSampler(val_set)
# Specify number of classes
num_classes = None
def get_network(network_name, use_pretrained):
return pd_get_network(network_name, num_classes, use_pretrained)
| true |
22b1f4aa2fff7f309da8413da23fb2f4fe52d157 | Python | kirankilva/Python_Games | /Number_Guessing_Game/guessNumber.py | UTF-8 | 5,838 | 3.4375 | 3 | [] | no_license | #Importing libraries
from tkinter import *
import random
from tkinter import messagebox as msg
#Code goes from here
class NumberGuess:
def __init__(self, root):
self.root = root
self.root.geometry('400x400+500+100')
self.root.title('Number Guessing Game')
#All variables
val = 0
self.num_var = IntVar()
self.chance_var = IntVar()
self.from_var = IntVar()
self.to_var = IntVar()
self.res_var = StringVar()
self.chance_var.set(5)
self.res_var.set('Hello User!..')
#Title
Label(self.root, text='NUMBER GUESSING GAME', width=30, height=1, font=('Times New Roman',15,'bold')).place(x=10, y=10)
#Enter the number label
num_lbl = Label(self.root, text='Enter the number : ', width=15, height=1, font=('Times New Roman',11))
num_lbl.place(x=25, y=100)
num_entry = Entry(self.root, width=10, background='white', bd=1, textvariable=self.num_var)
num_entry.place(x=145, y=103)
#Chances label
chance_lbl = Label(self.root, text='Chances left : ', width=15, height=1, font=('Times New Roman',11))
chance_lbl.place(x=35, y=160)
chance_entry = Entry(self.root, width=10, background='white', bd=1, textvariable=self.chance_var)
chance_entry.place(x=145, y=163)
#Result label
res_lbl = Label(self.root, text='Result will appear here : ', width=20, height=1, font=('Times New Roman',11))
res_lbl.place(x=20, y=220)
res_entry = Entry(self.root, width=55, background='white', bd=1, textvariable=self.res_var)
res_entry.place(x=30, y=245)
#Range from
from_lbl = Label(self.root, text='From : ', width=5, height=1, font=('Times New Roman',11))
from_lbl.place(x=250, y=100)
from_entry = Entry(self.root, width=10, background='white', bd=1, textvariable=self.from_var)
from_entry.place(x=300, y=103)
#Range to
to_lbl = Label(self.root, text='To : ', width=5, height=1, font=('Times New Roman',11))
to_lbl.place(x=255, y=160)
to_entry = Entry(self.root, width=10, background='white', bd=1, textvariable=self.to_var)
to_entry.place(x=300, y=163)
#Designer
Label(self.root, text="designed by", fg="grey").place(x=170, y=360)
Label(self.root, text="KIRAN KILVA", font="Helvetica 10").place(x=165, y=378)
#buttons
#Start button
start_btn = Button(self.root, text='S T A R T', width=10, bd=1, bg='orange', activebackground='orange', fg='white', activeforeground='black', font=('',9,'bold'), cursor='hand2', command=self.start_func)
start_btn.place(x=30, y=300)
#Reset button
reset_btn = Button(self.root, text='R E S E T', width=10, bd=1, bg='blue', activebackground='blue', fg='white', activeforeground='black', font=('',9,'bold'), cursor='hand2', command=self.reset_func)
reset_btn.place(x=115, y=300)
#Guess button
guess_btn = Button(self.root, text='G U E S S', width=10, bd=1, bg='green', activebackground='green', fg='white', activeforeground='black', font=('',9,'bold'), cursor='hand2', command=self.guess_func)
guess_btn.place(x=200, y=300)
#Clear button
clear_btn = Button(self.root, text='E X I T', width=10, bd=1, bg='grey', activebackground='grey', fg='white', activeforeground='black', font=('',9,'bold'), cursor='hand2', command=self.exit_func)
clear_btn.place(x=285, y=300)
#Start button function
def start_func(self):
try:
if (self.from_var.get() == 0 and self.to_var.get() == 0) or (self.from_var.get() == '' and self.to_var.get() == ''):
msg.showwarning('Warning', 'Please select the range.')
else:
self.res_var.set(f'Guess the Number between {self.from_var.get()} and {self.to_var.get()}')
global val
num = random.randint(self.from_var.get(), self.to_var.get())
val = num
except:
msg.showerror('ERROR', 'Invalid Entries. Please try again')
#Reset button function
def reset_func(self):
self.num_var.set(0)
self.chance_var.set(5)
self.from_var.set(0)
self.to_var.set(0)
self.res_var.set('Hello User!..')
#Guess button function
def guess_func(self):
if self.res_var.get() == 'Hello User!..':
msg.showwarning('WARNING', 'Please click the START button')
else:
if self.num_var.get() > val:
self.res_var.set(f'Wrong!. {self.num_var.get()} is Greater than the Number')
elif self.num_var.get() < val:
self.res_var.set(f'Wrong!. {self.num_var.get()} is Less than the Number')
else:
self.res_var.set('Congragulations!.. You WON the game')
ans = msg.askquestion('Congragulations', 'You Won the game!!\nDo you want to play again?')
if ans == 'yes':
self.reset_func()
self.chance_var.set(self.chance_var.get()+1)
else:
self.root.destroy()
self.chance_var.set(self.chance_var.get()-1)
if self.chance_var.get() == 0:
msg.showinfo('BETTER LUCK NEXT TIME', 'Sorry!! You Lost The Game')
self.reset_func()
#Exit button function
def exit_func(self):
ask = msg.askquestion('EXIT', 'Do you really want to exit the game?')
if ask == 'yes':
self.root.destroy()
root = Tk()
obj = NumberGuess(root)
root.mainloop()
| true |
6008c61b6ef3e8b383bf6c1070442727a2da0db6 | Python | Tsukigata/MonetaryDataSpider | /monetarySpider/spiders/realTimeSpider.py | UTF-8 | 2,776 | 2.59375 | 3 | [] | no_license | import scrapy
import json
from monetarySpider.items import MonetaryRealItem
from scrapy.http.request import Request
import time
#由于使用的IP质量不高所以导致数据的实时性有所下降,关于代理池的设定在middlewas和settings中的IPPOOLS中进行
class RealtimespiderSpider(scrapy.Spider):
name = 'realTimeSpider'
allowed_domains = ['vip.stock.finance.sina.com.cn']
start_urls = ['http://vip.stock.finance.sina.com.cn/']
#使用另一套数据处理机制
custom_settings = {
'ITEM_PIPELINES':{'monetarySpider.pipelines.MonetaryspiderPipelineRealtime':300}
}
def start_requests(self):
while True:#一旦运行就需要手动停止
#沪深A股
for page in range(1,5):#这里爬取5页,沪深A股一共有58页,下同
base_url ='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?page='+str(page)+'&num=80&sort=symbol&asc=1&node=hs_a&symbol=&_s_r_a=page'
yield Request(url = base_url,callback = self.parse_index,dont_filter = True)
#创业板
for page in range(1,4):#这里爬取5页,创业板一共有15页,下同
base_url ='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?page='+str(page)+'&num=80&sort=symbol&asc=1&node=cyb&symbol=&_s_r_a=page'
yield Request(url = base_url,callback = self.parse_index,dont_filter = True)
#科创板
for page in range(1,2):#这里爬取5页,科创版一共有5页
base_url ='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?page='+str(page)+'&num=80&sort=symbol&asc=1&node=kcb&symbol=&_s_r_a=page'
yield Request(url = base_url,callback = self.parse_index,dont_filter = True)
time.sleep(1)#1s的时间间隔,再次进行爬取
def parse_index(self,response):
resp = json.loads(response.text)
date = time.strftime('%Y-%m-%d')#将当前的时间获取
item = MonetaryRealItem()
for i in range(80):
item['date'] = date#交易日期
item['stock_code'] = resp[i]['symbol']#股票代码
item['ticktime'] = resp[i]['ticktime']#交易具体时间
item['volume'] = str(resp[i]['volume'])#累计成交量
item['amount'] = str(resp[i]['amount'])#累计成交额
item['price'] = str(resp[i]['trade'])#股票当前价格
item['pricechange'] = str(resp[i]['pricechange'])#价格变动
item['changepercent'] = str(resp[i]['changepercent'])#价格变动百分比
yield item
| true |
a7d5ff707a94dfb8761358eab9bca0f2aa06a9d4 | Python | AhmedElkhodary/Python-3-Programming-specialization | /1- Python Basics/FinalCourseAssignment/pro4.py | UTF-8 | 434 | 4.09375 | 4 | [] | no_license | #A palindrome is a phrase that, if reversed, would read the exact same.
#Write code that checks if p_phrase is a palindrome by reversing it
#and then checking if the reversed version is equal to the original.
#Assign the reversed version of p_phrase to the variable r_phrase so that we can check your work.
p_phrase = "was it a car or a cat I saw"
r_phrase = ""
for ch in p_phrase:
r_phrase = ch + r_phrase
print(r_phrase)
| true |
2f61e708b7df8b16e2d4fbbfbf6a7bcfc9243add | Python | Orderlee/tjoeun | /pycharm/ch02/ex07.py | UTF-8 | 1,045 | 4.09375 | 4 | [] | no_license | number=3
#숫자이지만 문자열 형식으로 출력 가능함
a='나는 사과 %s개를 먹었다.' %number
print(a)
#%10s 10자리의 문자열(오른쪽 정렬)
a='나는 사과 %10s개를 먹었다.' %number
print(a)
# $-10s 10자리의 문자열 (왼쪽 정렬)
a='나는 사과 %-10s개를 먹었다.' %number
print(a)
# {}위치에 값이 입력됨
a='나는 사과 {}개를 먹었다.'.format(number)
print(a)
# >자리수: 오른쪽 정렬
# {변수인덱스:세부옵션}
a='나는 사과 {:>10}개를 먹었다.'.format(number)
print(a)
# <자리수: 왼쪽 정렬
a='나는 사과 {:<10}개를 먹었다.'.format(number)
print(a)
# &자리수: 가운데 정렬
a='나는 사과 {:^10}개를 먹었다.'.format(number)
print(a)
#실수값의 표현 방법
a = 3.42134234
print('%f' % a) #실수형으로 출력
print('%.2f' %a) #소수 이하 2자리까지 출력
# 전체자리수.소수이하자리수
print('%7.2f' %a)
print('{}'.format(a))
print('{:.2f}'.format(a))
print('{:7.2f}'.format(a)) | true |
7ff972ce4da7bf0da9a78647372cc3789881c746 | Python | firemark/notebooks | /gil-prelection/draw.py | UTF-8 | 208 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python2
from PIL import Image
from sys import stdin
width = 1200
height = 800
data = stdin.read()
img = Image.new('L', (width, height))
img.putdata([int(x) for x in data.split(' ')])
img.show()
| true |
80107ba7fc96cec13566ccb5866f585c590fac00 | Python | abagaria/sentiment-classification | /sentiment.py | UTF-8 | 11,367 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python3
from os import path
import argparse
import logging
import multiprocessing
import pickle
from collections import OrderedDict
import pdb
import time
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import numpy as np
# Pytorch Imports
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from tqdm import tqdm
# Stencil imports
import embeddings
from dataset import SentimentDataset
import hyperparams
class SentimentNetwork(nn.Module):
""" Sentiment classifier network. """
def __init__(self, hidden_sz, embedding_lookup, rnn_layers=1, device=torch.device('cpu')):
"""
The constructor for our net. The architecture of this net is the following:
Embedding lookup -> RNN Encoder -> Linear layer -> Linear Layer
You should apply a non-linear activation after the first linear layer
and finally a softmax for the logits (optional if using CrossEntropyLoss). If you observe overfitting,
dropout can be inserted between the two linear layers. Note that looking up the embeddings are handled
by the modules in embeddings.py which can then be called in this net's forward pass.
:param hidden_sz (int): The size of our RNN's hidden state
:param embedding_lookup (str): The type of word embedding used.
Either 'glove', 'elmo', 'both', or 'random'.
:param num_layers (int): The number of RNN cells we want in our net (c.f num_layers param in torch.nn.LSTMCell)
"""
super(SentimentNetwork, self).__init__()
self.hidden_sz = hidden_sz
self.rnn_layers = rnn_layers
self.num_outputs = 2
self.interm_size_1 = 256
self.interm_size_2 = 64
self.embedding_lookup = embedding_lookup.to(device) # instance of torch.nn.Module
self.embed_size = embedding_lookup.embed_size # int value
## --- TODO: define the network architecture here ---
## Hint: you may wish to use nn.Sequential to stack linear layers, non-linear
## activations, and dropout into one module
##
## Use GRU as your RNN architecture
self.rnn_encoder = nn.GRU(self.embed_size, self.hidden_sz, num_layers=rnn_layers, batch_first=True)
self.classifier = nn.Sequential(
nn.Linear(self.hidden_sz, self.interm_size_1),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.interm_size_1, self.interm_size_2),
nn.LeakyReLU(),
nn.Dropout(0.5),
nn.Linear(self.interm_size_2, self.num_outputs)
)
self._dev = device
self.to(device)
def forward(self, tokens, seq_lens):
""" TODO The forward pass for our model.
:param tokens: vectorized sequence inputs as token ids.
:param seq_lens: original sequence lengths of each input (prior to padding)
You should return a tensor of (batch_size x 2) logits for each class per batch element.
"""
curr_batch_size = seq_lens.shape[0] # hint: use this for reshaping RNN hidden state
# 1. Grab the embeddings:
embeds = self.embedding_lookup(tokens) # the embeddings for the token sequence
# 2. Sort seq_lens and embeds in descending order of seq_lens. (check out torch.sort)
# This is expected by torch.nn.utils.pack_padded_sequence.
sorted_seq_lens, perm_idx = seq_lens.sort(0, descending=True)
sorted_seq_tensor = embeds[perm_idx]
# 3. Obtain a PackedSequence object from pack_padded_sequence.
# Be sure to pass batch_first=True as the first dimension of our input is the batch dim.
packed_input = pack_padded_sequence(sorted_seq_tensor, sorted_seq_lens, batch_first=True)
# 4. Apply the RNN over the sequence of packed embeddings to obtain a sentence encoding.
output, hidden = self.rnn_encoder(packed_input, self.init_hidden(curr_batch_size))
# 5. Pass the sentence encoding (RNN hidden state) through your classifier net.
logits = self.classifier(hidden).squeeze(0)
# 6. Remember to unsort the output from step 5. If you sorted seq_lens and obtained a permutation
# over its indices (perm_ix), then the sorted indices over perm_ix will "unsort".
# For example:
# _, unperm_ix = perm_ix.sort(0)
# output = x[unperm_ix]
# return output
_, unperm_idx = perm_idx.sort(0)
unsorted_logits = logits[unperm_idx].squeeze(1)
return unsorted_logits
def init_hidden(self, batch_size):
return torch.zeros(self.rnn_layers, batch_size, self.hidden_sz, device=self._dev)
def train(hp, embedding_lookup):
""" This is the main training loop
:param hp: HParams instance (see hyperparams.py)
:param embedding_lookup: torch module for performing embedding lookups (see embeddings.py)
"""
modes = ['train', 'dev']
# Note: each of these are dicts that map mode -> object, depending on if we're using the training or dev data.
datasets = {mode: SentimentDataset(args.data, mode) for mode in modes}
data_sizes = {mode: len(datasets[mode]) for mode in modes} # hint: useful for averaging loss per batch
dataloaders = {mode: DataLoader(datasets[mode], batch_size=hp.batch_size, shuffle=True, num_workers=6, drop_last=True) for mode in modes}
model = SentimentNetwork(hp.rnn_hidden_size, embedding_lookup, device=DEV)
print(model)
loss_func = nn.CrossEntropyLoss() # TODO choose a loss criterion
optimizer = optim.Adam(model.parameters(), lr=hp.learn_rate)
train_loss = [] # training loss per epoch, averaged over batches
dev_loss = [] # dev loss each epoch, averaged over batches
# Note: similar to above, we can map mode -> list to append to the appropriate list
losses = {'train': train_loss, 'dev': dev_loss}
for epoch in range(1, hp.num_epochs+1):
for mode in modes:
running_loss = 0.0
for (vectorized_seq, seq_len), label in tqdm(dataloaders[mode], desc='{}:{}/{}'.format(mode, epoch, hp.num_epochs)):
vectorized_seq = vectorized_seq # note: we don't pass this to GPU yet
seq_len = seq_len.to(DEV)
label = label.long().to(DEV)
if mode == 'train':
model.train() # tell pytorch to set the model to train mode
# TODO complete the training step. Hint: you did this for hw1
# don't forget to update running_loss as well
model.zero_grad() # clear gradients (torch will accumulate them)
logits = model(vectorized_seq, seq_len)
# max_logits = torch.max(logits, dim=1)[0]
loss = loss_func(logits, label)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
model.eval()
with torch.no_grad():
logits = model(vectorized_seq, seq_len)
loss = loss_func(logits, label)
running_loss += loss.item()
avg_loss = running_loss/(data_sizes[mode]/64)
losses[mode].append(avg_loss)
print("{} Loss: {}".format(mode, avg_loss))
torch.save(model.state_dict(), "{embed}_{i}_weights.pt".format(embed=args.embedding, i=epoch))
# TODO plot train_loss and dev_loss
plt.subplot(1, 2, 1)
plt.plot(losses['train'])
plt.title('Training loss')
plt.subplot(1, 2, 2)
plt.plot(losses['dev'])
plt.title('Development loss')
plt.savefig("{}_learning_curves_{}.png".format(args.embedding, time.time()))
plt.close()
def evaluate(hp, embedding_lookup):
""" This is used for the evaluation of the net. """
mode = 'test' # use test data
dataset = SentimentDataset(args.data, mode)
dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=6)
model = SentimentNetwork(hp.rnn_hidden_size, embedding_lookup, device=DEV)
model.load_state_dict(torch.load(args.restore))
data_size = len(dataset)
confusion = torch.zeros((2,2)) # TODO fill out this confusion matrix
for (vectorized_seq, seq_len), label in tqdm(dataloader, ascii=True):
vectorized_seq = vectorized_seq
seq_len = seq_len.to(DEV)
label = label.to(DEV)
model.eval()
with torch.no_grad():
logits = model(vectorized_seq, seq_len)
# TODO obtain a sentiment class prediction from output
predicted_labels = torch.argmax(logits, dim=1)
# assert sum(label.numpy().shape) == 1 and sum(predicted_labels.numpy().shape) == 1, "Expected one label"
# confusion[label.numpy()[0]][predicted_labels.numpy()[0]] += 1
confusion[label, predicted_labels] += 1
accuracy = np.trace(confusion.numpy()) * 100. / data_size # TODO
print("Sentiment Classification Accuracy of {:.2f}%".format(accuracy))
print("Confusion matrix:")
print(confusion)
def main():
# Map word index back to the word's string. Due to a quirk in
# pytorch's DataLoader implementation, we must produce batches of
# integer id sequences. However, ELMo embeddings are character-level
# and as such need the word. Additionally, we do not wish to restrict
# ElMo to GloVe's vocabulary, and thus must map words to non-glove IDs here:
with open(path.join(args.data, "idx2word.dict"), "rb") as f:
idx2word = pickle.load(f)
# --- Select hyperparameters and embedding lookup classes
# --- based on the embedding type:
if args.embedding == "elmo":
lookup = embeddings.Elmo(idx2word, device=DEV)
hp = hyperparams.ElmoHParams()
elif args.embedding == "glove":
lookup = embeddings.Glove(args.data, idx2word, device=DEV)
hp = hyperparams.GloveHParams()
elif args.embedding == "both":
lookup = embeddings.ElmoGlove(args.data, idx2word, device=DEV)
hp = hyperparams.ElmoGloveHParams()
elif args.embedding == "random":
lookup = embeddings.RandEmbed(len(idx2word), device=DEV)
hp = hyperparams.RandEmbedHParams(embed_size=lookup.embed_size)
else:
print("--embeddings must be one of: 'elmo', 'glove', 'both', or 'random'")
# --- Either load and evaluate a trained model, or train and save a model ---
if args.restore:
evaluate(hp, lookup)
else:
train(hp, lookup)
if __name__ == '__main__':
logger = multiprocessing.get_logger()
logger.setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, help="path to data file", default="data")
parser.add_argument("--embedding", type=str, help="embedding type")
parser.add_argument("--device", type=str, help="cuda for gpu and cpu otherwise", default="cpu")
parser.add_argument("--restore", help="filepath to restore")
args = parser.parse_args()
DEV = torch.device(args.device)
print("######################### Using {} ############################".format(DEV))
main()
| true |
d2f16162fb39c6372c0d09bb8888832cbdd356f5 | Python | crcrpar/chainer | /tests/chainer_tests/utils_tests/test_cache.py | UTF-8 | 2,502 | 2.515625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import unittest
import numpy
import chainer
from chainer import testing
from chainer.utils import cache
class MockDistribution(object):
def __init__(self, x):
self.x = x
self.h_call_count = 0
self.y_call_count = 0
@cache.cached_property
def h(self):
self.h_call_count += 1
return self.x * 2
@cache.cached_property
def y(self):
self.y_call_count += 1
return self.h * 3
class TestCachedProperty(unittest.TestCase):
def test_name(self):
assert MockDistribution.y.__name__ == 'y'
def test1(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
h0 = obj.h
h1 = obj.h
assert obj.h_call_count == 1
assert h0 is h1
numpy.testing.assert_allclose(h0.array, 2.)
def test2(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
with chainer.no_backprop_mode():
h0 = obj.h
h1 = obj.h
assert obj.h_call_count == 1
assert h0 is h1
numpy.testing.assert_allclose(h0.array, 2.)
def test3(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
h0 = obj.h
with chainer.no_backprop_mode():
h1 = obj.h
h2 = obj.h
with chainer.no_backprop_mode():
h3 = obj.h
assert obj.h_call_count <= 2
assert h0 is h2
assert h0 is not h1
assert h1 is h3
numpy.testing.assert_allclose(h0.array, 2.)
numpy.testing.assert_allclose(h1.array, 2.)
def test_attrs1(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
h0 = obj.h
y0 = obj.y
h1 = obj.h
y1 = obj.y
assert obj.h_call_count == 1
assert obj.y_call_count == 1
assert h0 is h1
assert y0 is y1
numpy.testing.assert_allclose(h0.array, 2.)
numpy.testing.assert_allclose(y0.array, 6.)
def test_objs1(self):
obj0 = MockDistribution(chainer.Variable(numpy.array([1.])))
obj1 = MockDistribution(chainer.Variable(numpy.array([10.])))
y00 = obj0.y
y10 = obj1.y
y01 = obj0.y
y11 = obj1.y
assert obj0.y_call_count == 1
assert obj1.y_call_count == 1
assert y00 is y01
assert y10 is y11
numpy.testing.assert_allclose(y00.array, 6.)
numpy.testing.assert_allclose(y10.array, 60.)
testing.run_module(__name__, __file__)
| true |
a0b8226fd8878ee12ee01f52e02b430d27a8b831 | Python | ehddnr301/corona_dashboard | /bar.py | UTF-8 | 560 | 2.703125 | 3 | [] | no_license | import plotly.express as px
from data import totals_df
bar_chart = px.bar(totals_df,
x='condition',
y='count',
title='Total Case',
hover_data={
'count': ':,'
},
labels={
'condition' : 'Condition',
'count': 'Count',
'color': 'Condition'
},
# color=['Confirmed', 'Deaths', 'Recovered'],
template='plotly_dark')
bar_chart.update_traces(
marker_color=['#EDD491', '#EE4F5A', '#F18E5C']
) | true |
1015d201991501c94aeb5a8c10d38a854dc9a014 | Python | DeSireFire/crawlProject | /wangyiMusic/crawl.py | UTF-8 | 5,704 | 2.5625 | 3 | [] | no_license | # -*- coding: UTF-8 –*-
'''
网易云音乐爬虫,现可以进行评论(热门评论以及全部评论)的爬取
作者:洪韬
时间:2020/3/11
'''
from Crypto.Cipher import AES
import base64
import requests
import json
import time
import csv
import os
import re
class wangyiCrawl:
def __init__(self):
self.Headers = {
'Accept': "*/*",
'Accept-Language': "zh-CN,zh;q=0.9",
'Connection': "keep-alive",
'Host': "music.163.com",
'User-Agent': "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
# 第二个参数
self.second_param = "010001"
# 第三个参数
self.third_param = "00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7"
# 第四个参数
self.forth_param = "0CoJUm6Qyw8W8jud"
def __create_path(self, *path):
finPath = ''
for i in range(len(path)):
finPath += path[i]
if not os.path.exists(finPath):
os.mkdir(finPath)
finPath += '/'
def __get_params(self, page):
# 获取encText,也就是params
iv = "0102030405060708"
first_key = self.forth_param
second_key = 'F' * 16
if page == 0:
first_param = '{rid:"", offset:"0", total:"true", limit:"20", csrf_token:""}'
else:
offset = str((page - 1) * 20)
first_param = '{rid:"", offset:"%s", total:"%s", limit:"20", csrf_token:""}' % (offset, 'false')
self.encText = self.__AES_encrypt(first_param, first_key, iv)
self.encText = self.__AES_encrypt(self.encText.decode('utf-8'), second_key, iv)
return self.encText
def __AES_encrypt(self, text, key, iv):
# AES加密
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(key.encode('utf-8'), AES.MODE_CBC, iv.encode('utf-8'))
encrypt_text = encryptor.encrypt(text.encode('utf-8'))
encrypt_text = base64.b64encode(encrypt_text)
return encrypt_text
# 获取热评以及全部评论
def get_comments(self, url, path):
rsp = requests.get(url.replace('#', ''), headers=self.Headers)
title = re.compile('"title": "(.*?)",').findall(rsp.text)[0]
self.__create_path(path, title)
file1 = open(path + '/' + title + '/hotComments.csv', 'w', encoding='utf-8', newline='')
file2 = open(path + '/' + title + '/comments.csv', 'w', encoding='utf-8', newline='')
fieldnames = ['user', 'thumbs-up', 'time', 'comments']
writer1 = csv.DictWriter(file1, fieldnames=fieldnames)
writer1.writeheader()
writer2 = csv.DictWriter(file2, fieldnames=fieldnames)
writer2.writeheader()
id = url.replace('https://music.163.com/#/song?id=', '')
url = 'https://music.163.com/weapi/v1/resource/comments/R_SO_4_' + id + '?csrf_token'
params = self.__get_params(1)
encSecKey = "257348aecb5e556c066de214e531faadd1c55d814f9be95fd06d6bff9f4c7a41f831f6394d5a3fd2e3881736d94a02ca919d952872e7d0a50ebfa1769a7a62d512f5f1ca21aec60bc3819a9c3ffca5eca9a0dba6d6f7249b06f5965ecfff3695b54e1c28f3f624750ed39e7de08fc8493242e26dbc4484a01c76f739e135637c"
self.post = {
'params': params,
'encSecKey': encSecKey,
}
response = requests.post(url, headers=self.Headers, data=self.post)
json_dict = json.loads(response.text)
hotcomments = json_dict['hotComments']
for i in hotcomments:
time_local = time.localtime(int(i['time'] / 1000)) # 将毫秒级时间转换为日期
dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
itemDict = {
'user': i['user']['nickname'],
'thumbs-up': str(i['likedCount']),
'time': dt,
'comments': i['content'].replace('\n', '')
}
writer1.writerow(itemDict)
file1.close()
comments_num = int(json_dict['total'])
present_page = 0
if (comments_num % 20 == 0):
page = comments_num / 20
else:
page = int(comments_num / 20) + 1
print("共有%d页评论" % page)
# 逐页抓取
for i in range(page):
print('正在爬取第' + str(i + 1) + '页')
params = self.__get_params(i + 1)
encSecKey = encSecKey
self.post = {
'params': params,
'encSecKey': encSecKey,
}
response = requests.post(url, headers=self.Headers, data=self.post)
json_dict = json.loads(response.text)
present_page = present_page + 1
for i in json_dict['comments']:
time_local = time.localtime(int(i['time'] / 1000)) # 将毫秒级时间转换为日期
dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
itemDict = {
'user': i['user']['nickname'],
'thumbs-up': str(i['likedCount']),
'time': dt,
'comments': i['content'].replace('\n', '')
}
writer2.writerow(itemDict)
file2.close()
if __name__ == '__main__':
app = wangyiCrawl()
#获取网易云音乐热门评论以及大概101页的评论
app.get_comments('https://music.163.com/#/song?id=1429392929','data')
| true |
b6960de14ba9bb85108b3ba0edfde141349570df | Python | sdutta21/DungeonCrawler | /pyGame/door.py | UTF-8 | 980 | 3.53125 | 4 | [] | no_license | """
File containing the door sprite class
"""
import pygame
from pygame.locals import RLEACCEL
class Door(pygame.sprite.Sprite):
"""
Class representing the door sprite
attrs:
surf: The pygame surf representing the sprite image
rect: The pygame rect representing the bounds of the sprite
"""
def __init__(self, x_cord, y_cord):
"""
Initializes the door class
args:
x: The x position to set the rect to
y: The y position to set the rect to
"""
super(Door, self).__init__()
image = pygame.image.load('Sprites/Door/bottom_door.png').convert()
image.set_colorkey('white', RLEACCEL)
self.surf = pygame.transform.smoothscale(image.convert_alpha(), (60, 30))
self.surf.set_colorkey('white', RLEACCEL)
self.rect = self.surf.get_rect()
self.rect.x = x_cord # pylint: disable=message
self.rect.y = y_cord # pylint: disable=message
| true |
17c947c7a3280ed0b06c9d3da2619418268d0a12 | Python | judecodetech/grmp | /deployments/fabfile/tools.py | UTF-8 | 2,746 | 2.671875 | 3 | [] | no_license | """
This module contains all the tasks to get a new or exisiting
server up and running
"""
from fabric.api import cd, env, puts, run
from fabric.colors import green, red, yellow
from fabric.operations import sudo
from fabfile import git
LETSENCRYPT_DOWNLOAD = 'https://github.com/letsencrypt/letsencrypt'
LETSENCRYPT_DIR = '/etc/letsencrypt'
#domains = ' '.join(['-d {}'.format(domain) for domain in env.DOMAINS])
def apt_get_install(debs):
"""
Use apt-get to install prerequisites packages necessary
for the server to run. Use python join method to concatenate
multiple packages in the @debs list.
"""
puts(yellow('Starting installation of prerequisite packages'))
sudo('apt-get --assume-yes install {}'.format(' '.join(debs)))
def set_shell_to_zsh():
"""
Set the default shell to zsh
"""
puts(yellow('Installing OhMyZsh theme and setting zsh to default shell'))
sudo('curl -L http://install.ohmyz.sh | sh')
def prep_gulp(repo_path, npm_debs):
"""
Install gulp and all its dependencies using npm.
Run gulp to generate the CSS static file.
"""
puts(green('Installing gulp and its dependencies using npm(node package manager)'))
with cd(repo_path):
run('npm install {}'.format(' '.join(npm_debs)))
run('npm install -g bower gulp')
run('bower install bourbon --allow-root')
run("gulp production")
def prep_requirements(requirements_file_path):
""""
Install all required pip dependencies on server
"""
puts(yellow('Installing python package dependencies'))
run('pip install -r {}'.format(requirements_file_path))
def create_directory(path):
"""
Create directory. If the path doesn't exist, will create it.
"""
puts(green('Creating website home directory'))
run('mkdir -p {}'.format(path))
def install_letsencrypt():
"""
Enable HTTPS and SSL on Nginx
"""
puts(red('Starting Installation of letsencrypt to enable HTTPs and SSL on Nginx'))
with cd('/etc'):
sudo('rm -rf letsencrypt')
git.fetch_clean_repo(LETSENCRYPT_DOWNLOAD)
with cd(LETSENCRYPT_DIR):
sudo('chgrp www-data /usr/share/nginx/html/')
sudo('./letsencrypt-auto certonly -a webroot --webroot-path=/usr/share/nginx/html -d {}'.format(env.DOMAINS))
# Generate a 2048-bit cert
sudo('openssl dhparam -out /etc/ssl/certs/dhparam.pem 2048')
def sed_replace(regex, replacement, file_path):
"""
Replace all instances of @regex with @replacement in the given @file_path.
"""
puts(yellow('Replacing all instances of {} with {} in {}'.format(regex, replacement, file_path)))
sudo('sed -i \'s/{}/{}/g\' {}'.format(regex, replacement, file_path))
| true |
cc7cab87f03b024c6103ef5084ab3da2162c9cef | Python | GabrielDUBOIS/projet-1 | /src/gui_app_fab.py | UTF-8 | 12,232 | 2.953125 | 3 | [] | no_license | # -*- coding: Utf-8 -*-
### Importations des modules de la bibliotheque standard
import tkinter as tk
import abc
import xml.etree.ElementTree as ET
### Importations des modules des bibliotheques tierces
### Importations des modules du projet
from def_app_cmd import * # @UnusedWildImport
### Déclarations de constantes
GUI_STRUCT_PATH = "./xml/gui_structure.xml" # ~ structure de l'interface
### Déclarations des classes
class XmlFunc:
__rootUrl = GUI_STRUCT_PATH
@staticmethod
def get_xml_elt(theObject, xmlRoot=None):
"""
Fonction retournant un objet xml de type Element.
-- xmlRoot : type string - path d'un fichier xml, ou,
contenu xml '<a>...<b>...</b></a>'
"""
# Obtention du document XML de description des structures
if not theObject.__rootUrl:
theObject.__rootUrl = GUI_STRUCT_PATH
if not xmlRoot:
return ET.parse(theObject.__rootUrl).getroot()
elif type(xmlRoot) == str:
try:
return ET.fromstring(xmlRoot)
except:
try:
return ET.parse(xmlRoot).getroot()
except:
raise ValueError('L\'argument passé de valeur %s'
' et de type %s n\'est pas supporté' %
(str(xmlRoot), type(xmlRoot)))
@staticmethod
def get_xml_root(theObject, xmlElt=None):
"""
Fonction retournant une chaîne de caractère xml.
-- xmlElt : type objet ET.Element
"""
if not theObject.__rootUrl:
theObject.__rootUrl = GUI_STRUCT_PATH
if not xmlElt:
xmlElt = ET.parse(theObject.__rootUrl).getroot()
if type(xmlElt) == ET.Element:
try:
return ET.tostring(xmlElt, method='xml', encoding='unicode')
except:
raise ValueError('L\'argument passé de valeur %s'
' et de type %s n\'est pas supporté' %
(str(xmlElt), type(xmlElt)))
## Propriétés / Fonctions décoreés
@property
def rootUrl(self):
"""
Obtenir l'URL du fichier xml décrivant l'interface utilisateur.
"""
if not XmlFunc.__rootUrl:
self.__rootUrl = GUI_STRUCT_PATH
return XmlFunc.__rootUrl
@rootUrl.setter
def rootUrl(self, url):
"""Fixer une URL de fichier xml décrivant l'interface utilisateur."""
print(url)
self.__rootUrl = url
@property
def myProps(self):
"""Obtient les options décoratives de l'objet depuis un type Element"""
if not self._myProps:
myEltOpt = self.myElt.find('./options')
self._myProps = {props: val.lower() for props, val \
in myEltOpt.items()}
return self._myProps
@myProps.setter
def myProps(self, theProps):
self._myProp = theProps
@property
def myPos(self):
"""Obtient les options décoratives de l'objet depuis un type Element"""
if not self._myPos:
myEltPac = self.myElt.find('./pack')
self._myPos = {pos: val.lower() for pos, val \
in myEltPac.items()}
return self._myPos
@myPos.setter
def myPos(self, thePos):
self._myPos = thePos
class BarFactory(tk.Frame, XmlFunc, metaclass=abc.ABCMeta):
"""
Fabrique de barres applicatives :
-- barre de menus
-- barre d'outils
-- barre de status
"""
## Emplacements des variables d'instance
__slots__ = ['myType', 'myElt', 'name', 'xmlDoc', '_myProps', '_myPos', \
'itemOrder', 'itemList']
# État d'initialisation de l'objet
initialized = False
## Associations {typeBar : classBar}
_class_assoc = {'menu': 'Menu', 'status': 'Status', 'tools': 'Tools'}
## Constructeur
def __new__(cls, master, name, xmlDoc=None, url=None):
"""Retourne une instance de barre applicative du type demandé."""
XmlFunc._XmlFunc__rootUrl = url
print('rootUrl %s' % cls._XmlFunc__rootUrl)
cls.myElt = XmlFunc.get_xml_elt(cls, xmlDoc).\
find('./bar[@label="%s"]' % name)
cls.myType = 'Bar' + cls._class_assoc[cls.myElt.get('type')]
try:
oInst = object.__new__( \
{c.__name__: c for c in cls.__subclasses__()}[cls.myType])
return oInst
except:
print('Impossible d\'instancier la classe %s' % cls.myType)
return False
def __init__(self, master, name, xmlDoc=None, url=None):
"""Initialisation et lancement de la construction de l'objet."""
tk.Frame.__init__(self, master)
# Mémorisation des propriétés de base de l'objet barre
self.name = name
self.master = master
self.__release(self, self.myType) # Initialisation des propriétés
## Fonction internes cachées
@staticmethod
def __release(oInst, theType=None):
"""Fonction d'initialisation des propriétés."""
if not oInst.initialized:
oInst.itemOrder = [] # Ensemble ordonné des éléments de la barre
oInst.itemList = {'itemOrder': oInst.itemOrder}
oInst.myType = theType # Réaf => id(cls.myType) <> id(self.myType)
oInst.xmlDoc = XmlFunc.get_xml_root(oInst, oInst.myElt)
oInst._myProps = {} # Options décoratives (Widget options)
oInst._myPos = {} # Options de positionnement
oInst.rootUrl = oInst.rootUrl
else:
oInst.__class__.myType = theType
oInst.initialized = True
def _build(self):
"""Construction des éléments génériques de la barre."""
self.config(**self.myProps) # Application des options décoratives
# Parcours des Elts composants la barre
mySubElts = self.myElt.findall('./item')
for anElt in mySubElts:
self.itemOrder.append(anElt.get('label'))
self._build_item(anElt)
self.pack(**self.myPos)
self.__release(self) # Ré-initialisation des propriétés de classe
## Méthodes abstraites
@abc.abstractmethod
def _build_item(self, theElt):
return
class BarMenu(BarFactory):
"""
Barre de menu.
"""
def _build_item(self, theElt):
theMenub = MenuButton(self, theElt)
self.itemList[theElt.get('label')] = theMenub
class MenuButton(XmlFunc, tk.Menubutton):
"""
Bouton de menu.
"""
## Emplacements des variables d'instance
__slots__ = ['myElt', 'name', 'xmlDoc', '_myProps', '_myPos', \
'itemOrder']
# État d'initialisation de l'objet
initialized = False
def __init__(self, master, theElt):
self.myElt = theElt
self.name = theElt.get('label')
tk.Menubutton.__init__(self, master, text=self.name)
self.__release(self, "MenuButton")
self._build()
## Fonctions internes cachées
@staticmethod
def __release(oInst, theType=None):
"""Fonction d'initialisation des propriétés."""
if not oInst.initialized:
oInst._myProps = {} # Options décoratives (Widget options)
oInst._myPos = {} # Options de positionnement
oInst.myType = theType
oInst.xmlDoc = XmlFunc.get_xml_root(oInst, oInst.myElt)
oInst.initialized = True
else:
pass
def _build(self):
theElt = XmlFunc.get_xml_elt(self).find('./menu[@label="%s"]' %
self.name)
theMenu = MenuFactory(self, theElt, 'menu')
self.configure(menu=theMenu)
self.config(**self.myProps)
self.pack(**self.myPos)
class MenuFactory(XmlFunc, metaclass=abc.ABCMeta):
"""
Fabrique de menu retournant soit un (sous-)menu ou une commande de menu.
"""
## Emplacements des variables d'instance
__slots__ = ['myType', 'myElt', 'name', 'xmlDoc', '_myProps', '_myPos', \
'itemOrder', 'master']
# État d'initialisation de l'objet
initialized = False
## Associations {typeItem : classItem}
_class_assoc = {'menu': 'Menu', 'command': 'CommandItem'}
## Constructeur
def __new__(cls, master, theElt, theType=None):
"""Retourne une instance d'item de menu [(sub-)menu | command]"""
cls.myElt = theElt
if not theType:
cls.myType = cls._class_assoc[cls.myElt.get('type')]
else:
cls.myType = cls._class_assoc[theType]
try:
oInst = object.__new__( \
{c.__name__: c for c in cls.__subclasses__()}[cls.myType])
return oInst
except:
print('Impossible d\'instancier la classe %s' % cls.myType)
return False
def __init__(self, master, theElt, theType=None):
"""Initialisation et lancement de la construction de l'objet."""
# Mémorisation des propriétés de base de l'objet
self.name = theElt.get('label')
self.master = master
self.__release(self, self.myType) # Initialisation des propriétés
self._build() # Construction de l'objet
self.__release(self) # Ré-initialisation des propriétés de classe
## Fonction internes cachées
@staticmethod
def __release(oInst, theType=None):
"""Fonction d'initialisation des propriétés."""
if not oInst.initialized:
oInst.itemOrder = [] # Ensemble ordonné des éléments du menu
oInst.itemList = {'itemOrder': oInst.itemOrder}
oInst.myType = theType # Réaf => id(cls.myType) <> id(self.myType)
oInst.xmlDoc = XmlFunc.get_xml_root(oInst, oInst.myElt)
oInst._myProps = {} # Options décoratives (Widget options)
oInst._myPos = {} # Options de positionnement
oInst.rootUrl = oInst.rootUrl
else:
oInst.__class__.myType = theType
oInst.initialized = True
def _build(self):
"""Construction des éléments génériques du menu."""
self._build_item(self.master)
## Méthodes abstraites
@abc.abstractmethod
def _build_item(self, theElt):
return
class Menu(MenuFactory, tk.Menu):
"""
Element composite constitué de sous-menu et de commandes.
Fait un appel à MenuFactory pour lancer la récursivité.
"""
def __init__(self, master, theElt, theType=None):
tk.Menu.__init__(self, master)
MenuFactory.__init__(self, master, theElt, theType)
def _build_item(self, master):
self.config(**self.myProps)
# Parcours des Elts composants le menu
mySubElts = self.myElt.findall('./item')
for anElt in mySubElts:
hisName = anElt.get('label')
hisType = None
if anElt.get('type') == 'menu':
# Programmation de la récursivité
anElt = XmlFunc.get_xml_elt(self).\
find('./menu[@label="%s"]' % hisName)
hisType = 'menu'
theItem = MenuFactory(self, anElt, hisType)
self.itemOrder.append(hisName)
self.itemList[hisName] = theItem
if master.__class__.__name__ == "Menu":
master.add_cascade(label=self.name, menu=self)
class CommandItem(MenuFactory):
"""
Element terminal du menu : commande.
"""
def __init__(self, master, theElt, theType=None):
MenuFactory.__init__(self, master, theElt, theType)
def _build_item(self, master):
myActionName = self.myElt.find('./command').get('value')
myAction = CommandAction.__dict__[myActionName]
master.add_command(label=self.name, command=myAction)
if __name__ == "__main__":
tkRoot = tk.Tk()
menuBar = BarFactory(tkRoot, "Barre de menus principale",
url="./xml/gui_structure_2.xml")
menuBar._build()
menuBar2 = BarFactory(tkRoot, "Barre de menus principale", url="")
menuBar2._build()
tkRoot.mainloop()
| true |
a1f02efcb524d62f219ec63499a323137a1bb4b5 | Python | 93suhwan/python-sos | /src/memory.py | UTF-8 | 774 | 3.46875 | 3 | [
"MIT"
] | permissive | from collections import OrderedDict
class Memory:
"""Memory is a finite mapping from locations to Num instances"""
def __init__(self):
self._store = OrderedDict()
def __str__(self):
h = 'Runtime Memory State'
lines = [h, '=' * len(h)]
for var, num in self._store.items():
lines.append('%7s -> %r' % (var, num.value))
s = '\n'.join(lines)
return '\n' + s + '\n'
__repr__ = __str__
def insert(self, location, value):
name = location.value
self._store[name] = value
def lookup(self, location):
name = location.value
value = self._store.get(name)
if value is None:
raise Exception('Undeclared variable %s' % name)
return value
| true |
4e02331aca4398d5d14b51f92186993bd567ff9c | Python | akuhnregnier/empirical-fire-modelling | /tests/test_variable.py | UTF-8 | 4,540 | 2.609375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from operator import methodcaller
import pytest
from empirical_fire_modelling import variable
@pytest.fixture
def dummy_variables():
variables = []
for rank, name, units in [(0, "foo", "1"), (1, "bar", "%")]:
for shift in [0, 1, 3, 6, 9]:
variables.append(
variable.VariableFactory(rank=rank, name=name, units=units)[shift]
)
return tuple(variables)
def test_get_offset(dummy_variables):
transformed = tuple(map(methodcaller("get_offset"), dummy_variables))
assert transformed == dummy_variables
def test_get_matching(dummy_variables):
assert (
variable.get_matching(dummy_variables, single=False, name="foo")
== variable.get_matching(dummy_variables, single=False, units="1")
== variable.get_matching(dummy_variables, single=False, name="foo", units="1")
== variable.get_matching(dummy_variables, single=False, units="1", name="foo")
== dummy_variables[:5]
)
def test_get_matching_single_exc(dummy_variables):
with pytest.raises(RuntimeError):
variable.get_matching(dummy_variables, name="foo")
def test_get_matching_strict_exc(dummy_variables):
assert variable.get_matching(dummy_variables, strict=False, name="foobar") == ()
with pytest.raises(RuntimeError):
variable.get_matching(dummy_variables, name="foobar")
def test_instantiate_variable():
kwargs = dict(rank=0, name="test", shift=0, units="1", parent=None)
with pytest.raises(TypeError):
variable.Variable(**kwargs)
assert variable.StandardVariable(**kwargs)
assert variable.OffsetVariable(**{**kwargs, **dict(shift=12)})
@pytest.mark.parametrize("shift", [12, 18, 24])
def test_get_offset(shift):
kwargs = dict(rank=0, name="test", shift=shift, units="1", parent=None)
assert variable.StandardVariable(**kwargs).get_offset() == variable.OffsetVariable(
**kwargs
)
assert variable.StandardVariable(
**kwargs
).get_offset().get_standard() == variable.StandardVariable(**kwargs)
def test_offset_variable_exc():
with pytest.raises(ValueError):
variable.OffsetVariable(rank=0, name="a", shift=0, units="1", parent=None)
def test_order_rank():
assert variable.StandardVariable(
rank=0, name="test", shift=0, units="1", parent=None
) < variable.StandardVariable(rank=1, name="test", shift=0, units="1", parent=None)
def test_order_name():
assert variable.StandardVariable(
rank=0, name="a", shift=0, units="1", parent=None
) == variable.StandardVariable(rank=0, name="b", shift=0, units="1", parent=None)
def test_order_shift():
assert variable.StandardVariable(
rank=0, name="a", shift=0, units="1", parent=None
) < variable.StandardVariable(rank=0, name="a", shift=1, units="1", parent=None)
def test_order_shift_offset():
assert variable.OffsetVariable(
rank=0, name="a", shift=12, units="1", parent=None
) < variable.OffsetVariable(rank=0, name="a", shift=18, units="1", parent=None)
def test_order_units():
assert variable.StandardVariable(
rank=0, name="a", shift=0, units="1", parent=None
) == variable.StandardVariable(rank=0, name="a", shift=0, units="2", parent=None)
def test_order_rank_shift():
common_kwargs = dict(name="a", units="1", parent=None)
assert variable.StandardVariable(
rank=0, shift=0, **common_kwargs
) == variable.StandardVariable(rank=0, shift=0, **common_kwargs)
assert variable.StandardVariable(
rank=0, shift=0, **common_kwargs
) < variable.StandardVariable(rank=1, shift=0, **common_kwargs)
assert variable.StandardVariable(
rank=0, shift=0, **common_kwargs
) < variable.StandardVariable(rank=0, shift=1, **common_kwargs)
assert variable.StandardVariable(
rank=0, shift=0, **common_kwargs
) < variable.StandardVariable(rank=1, shift=1, **common_kwargs)
def test_sort_variables():
common_kwargs = dict(name="a", units="1", parent=None)
variables = (
variable.StandardVariable(rank=0, shift=3, **common_kwargs),
variable.OffsetVariable(rank=0, shift=12, **common_kwargs),
variable.StandardVariable(rank=0, shift=0, **common_kwargs),
)
assert variable.sort_variables(variables) == (
variable.StandardVariable(rank=0, shift=0, **common_kwargs),
variable.StandardVariable(rank=0, shift=3, **common_kwargs),
variable.OffsetVariable(rank=0, shift=12, **common_kwargs),
)
| true |
1c9b8ad5205ebf55760dd6822ceb57559dd589db | Python | SauravKanchan/workshop | /main.py | UTF-8 | 347 | 3.984375 | 4 | [] | no_license | data = input("Enter your data")
number = int(data)
# this for loop which starts from 0 and ends at 12.k
for i in range(1,13):
print(number, "X",i,"=", i*number )
print("End of loop")
'''
count = 0
while count<=12:
print(number, "X",count,"=", count*number )
count = count +1
print("End of while loop")
'''
print("End of program")
| true |
cd6a1d2dfcddb356c8ea0b62ff7d0a5a66a30f0e | Python | no-more-coffee/my-interview-questions | /leet/Weekly Contest 181/1.py | UTF-8 | 465 | 3.515625 | 4 | [] | no_license | from typing import List
class Solution:
def createTargetArray(self, nums: List[int], index: List[int]) -> List[int]:
a = []
for i, n in zip(index, nums):
print(i, n)
a.insert(i, n)
return a
print(Solution().createTargetArray(nums=[0, 1, 2, 3, 4], index=[0, 1, 2, 2, 1]))
print(Solution().createTargetArray(nums = [1,2,3,4,0], index = [0,1,2,3,0]))
print(Solution().createTargetArray(nums = [1], index = [0]))
| true |
a3541cc3bd6d92f12eb3b9271ae64a163906f929 | Python | huiboSong/tester | /core/logger.py | UTF-8 | 1,854 | 2.953125 | 3 | [] | no_license | # coding=utf-8
import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
__author__ = 'qiguojie'
__date__ = '2016-11-30'
import datetime, os
import config
class Logger(object):
def __init__(self):
# 要写入log的目录,这个需要在config中配置
self.log_dir = config.log_file_dir
def log_write(self, message):
"""
写入log文件,包含检查文件是否存在(每天生成1个),如不存在则创建;然后使用追加的方式写入message信息
"""
log_dir = self.log_dir
# 判断logs目录是否存在;如不存在则创建;
if os.path.exists(log_dir):
log_file = os.path.join(log_dir, 'comall_' + datetime.datetime.now().strftime('%Y%m%d') + '.log')
else:
os.mkdir(r'%s' % log_dir)
log_file = os.path.join(log_dir, 'comall_' + datetime.datetime.now().strftime('%Y%m%d') + '.log')
file_open = open(log_file, "a")
try:
file_open.write(message + "\n")
finally:
file_open.close()
def info(self, message):
# 判断执行信息开关是否打开,如果打开则打印log
if config.info:
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.log_write(now + " Info: " + message)
def debug(self, message):
# 判断调试信息的开关是否打开,如果打开则打印log
if config.debug:
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.log_write(now + " Debug: " + message)
def error(self, message):
# 判断调试信息的开关是否打开,如果打开则打印log
if config.error:
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.log_write(now + " Error: " + message)
| true |
198adf60567ff5f3a73ad1edd98db62cbeef76d0 | Python | prashkumara/Data-Structures | /LeetCode/501.Find_Mode_in_Binary_Search_Tree.py | UTF-8 | 724 | 3.28125 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import collections
class Solution(object):
def findMode(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
res = []
self.inorder(root, res)
count = collections.Counter(res)
max_occ = max(count.values())
return ([k for k in count if count[k] == max_occ])
def inorder(self, root, res):
if root:
self.inorder(root.left, res)
res.append(root.val)
self.inorder(root.right, res) | true |
6ab7eea38cae655c4081853048f27b442f5fe90a | Python | crushthehype/magic | /onix.py | UTF-8 | 1,260 | 2.6875 | 3 | [] | no_license | import bitmex
import pandas as pd
import numpy as np
import requests, json
import lxml
apiKey = ''
apiSecret = ''
# Connecting to client
client = bitmex.bitmex(test = False, api_key=apiKey, api_secret=apiSecret)
# fetching trade execution history
r = client.Execution.Execution_getTradeHistory().result()
d = pd.DataFrame(list(r[0]))
cols = list(d.columns.str.lower())
# get data from ONIXS for dictionary
df = pd.read_html('https://www.onixs.biz/fix-dictionary/5.0.SP2/msgType_8_8.html', skiprows=5)[0]
df.columns = df.iloc[0]
df = df.iloc[2:]
df.head()
df.tail()
# lowercase the values of field name to match Onixs dictionary
df['Field Name'] = df['Field Name'].str.lower()
# individual search - just type what you're looking for
x = 'timeinforce'
result = df.loc[df['Field Name'] == x]
result = list(result.values)
row = result[0][0]
tag = result[0][1]
required = result[0][2]
desc = result[0][3]
print('Row: ' + row, 'Tag: ' + tag , 'Required: ' + required, 'Description: ' + desc, sep = '\n')
# match the execution history columns to Onixs dictionary
match = df[df['Field Name'].isin(cols)]
match = match.dropna()
pd.set_option('display.max_colwidth', -1)
print(match[['Field Name', 'Comments']], flush=True) | true |
bff4c33683c77c87e5871a8d88394b0bccf5ee7e | Python | Wenhao-Yang/PythonLearning | /Chapter 3 Linear Regression/3.2_DecompositionMethod.py | UTF-8 | 2,162 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
@author: yangwenhao
@contact: 874681044@qq.com
@software: PyCharm
@file: 3.2_DecompositionMethod.py
@time: 2018/12/11 下午5:07
@overview: We implement a matrix decomposition method for linear method. Implementing inverse methods in the previous recipe can be numerically inefficient in most cases, especially when the matrices get cery large. Another approach is to use the Cholesky decomposition method. The Cholesky decomposition decomposes a mtrix into a lower and upper triangular matrix, say L and L' , such that these matrices are transposition of each other. Here we solve the system, Ax=b, by writing it as LL'x=b. We will first solve Ly=b and then solve L'x=y to arrive at our coefficient matrix ,x.
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
ops.reset_default_graph()
sess = tf.Session()
#Create the data, and obtain the A and b matrix in the same way as before.
x_vals = np.linspace(0, 10, 100)
y_vals = x_vals + np.random.normal(0, 1, 100)
x_vals_column = np.transpose(np.matrix(x_vals))
ones_column = np.transpose(np.matrix(np.repeat(1, 100)))
A = np.column_stack((x_vals_column, ones_column))
b = np.transpose(np.matrix(y_vals))
A_tensor = tf.constant(A)
b_tensot = tf.constant(b)
#Find the Cholesky decomposition of our square matrix
tA_A = tf.matmul(tf.transpose(A_tensor), A_tensor)
L = tf.cholesky(tA_A)
tA_b = tf.matmul(tf.transpose(A_tensor), b)
sol1 = tf.matrix_solve(L, tA_b)
sol2 = tf.matrix_solve(tf.transpose(L), sol1)
#Extract the coefficients
solution_eval = sess.run(sol2)
slope = solution_eval[0][0]
y_intercept = solution_eval[1][0]
print('Slope: ' + str(slope))
print('y_intercept: ' + str(y_intercept))
best_fit = []
for i in x_vals:
best_fit.append(slope*i + y_intercept)
#The code could be used for Image size modification
# plt.figure(figsize=(10.8, 7.2))
plt.plot(x_vals, y_vals, 'o', label='Data')
plt.plot(x_vals, best_fit, 'r-', label='Best fit lines', linewidth=3)
plt.legend(loc='upper left')
plt.title('Solving Linear Regression with Cholesky Decomposition Method', fontsize=12)
plt.show()
| true |
9e736ea2bc7b3ec2f559003c062218675f391160 | Python | abhisheksaurabh1985/CI-Project | /testScripts.py | UTF-8 | 595 | 2.640625 | 3 | [] | no_license | # Test scripts for the function generateWordWindows(). File generated is in the output folder.
with open("./output/testGenerateWordWindows.txt", "w") as textFileObj:
for i in range(11):
textFileObj.write(str(all_sentences[i]) + '\n')
textFileObj.write(str(nestedListSentenceTuples[i]) + '\n')
for j in range(len(all_sentences[i])):
try:
textFileObj.write(str(all_sentences[i][j].lower()) + '\n')
textFileObj.write(str(indexedVocabulary[all_sentences[i][j].lower()]) + '\n')
except:
continue
| true |
0dd8c89da3033e345c0fc9f4aa3585003b317900 | Python | dozernz/assorted-pub | /aio-multiproc.py | UTF-8 | 1,049 | 2.59375 | 3 | [] | no_license | import asyncio,aiohttp
import concurrent.futures
import time
NUM_PROCS=2
NUM_SESS=4
PER_SESS=10000
async def long_task():
async with aiohttp.ClientSession() as session:
for i in range(PER_SESS):
async with session.get('http://127.0.0.1') as response:
html = await response.text()
def cpu_bound_work(i):
try:
print(f"Starting run {i}")
asyncio.run(long_task())
except Exception as e:
print(e)
return
async def main():
with concurrent.futures.ProcessPoolExecutor(NUM_PROCS) as executor:
loop = asyncio.get_running_loop()
# Schedule several CPU-bound tasks concurrently
tasks = [loop.run_in_executor(executor, cpu_bound_work, i) for i in range(NUM_SESS)]
# Wait for all tasks to complete and get their results
results = await asyncio.gather(*tasks)
return results
if __name__ == '__main__':
start_time = time.time()
results = asyncio.run(main())
print("--- %s seconds ---" % (time.time() - start_time))
| true |
60d899681bd0e175af2faf5d08a2cdf089eb2e2a | Python | PaddlePaddle/Paddle | /python/paddle/text/datasets/imikolov.py | UTF-8 | 6,040 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import tarfile
import numpy as np
from paddle.dataset.common import _check_exists_and_download
from paddle.io import Dataset
__all__ = []
URL = 'https://dataset.bj.bcebos.com/imikolov%2Fsimple-examples.tgz'
MD5 = '30177ea32e27c525793142b6bf2c8e2d'
class Imikolov(Dataset):
"""
Implementation of imikolov dataset.
Args:
data_file(str): path to data tar file, can be set None if
:attr:`download` is True. Default None
data_type(str): 'NGRAM' or 'SEQ'. Default 'NGRAM'.
window_size(int): sliding window size for 'NGRAM' data. Default -1.
mode(str): 'train' 'test' mode. Default 'train'.
min_word_freq(int): minimal word frequence for building word dictionary. Default 50.
download(bool): whether to download dataset automatically if
:attr:`data_file` is not set. Default True
Returns:
Dataset: instance of imikolov dataset
Examples:
.. code-block:: python
import paddle
from paddle.text.datasets import Imikolov
class SimpleNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, src, trg):
return paddle.sum(src), paddle.sum(trg)
imikolov = Imikolov(mode='train', data_type='SEQ', window_size=2)
for i in range(10):
src, trg = imikolov[i]
src = paddle.to_tensor(src)
trg = paddle.to_tensor(trg)
model = SimpleNet()
src, trg = model(src, trg)
print(src.shape, trg.shape)
"""
def __init__(
self,
data_file=None,
data_type='NGRAM',
window_size=-1,
mode='train',
min_word_freq=50,
download=True,
):
assert data_type.upper() in [
'NGRAM',
'SEQ',
], f"data type should be 'NGRAM', 'SEQ', but got {data_type}"
self.data_type = data_type.upper()
assert mode.lower() in [
'train',
'test',
], f"mode should be 'train', 'test', but got {mode}"
self.mode = mode.lower()
self.window_size = window_size
self.min_word_freq = min_word_freq
self.data_file = data_file
if self.data_file is None:
assert (
download
), "data_file is not set and downloading automatically disabled"
self.data_file = _check_exists_and_download(
data_file, URL, MD5, 'imikolov', download
)
# Build a word dictionary from the corpus
self.word_idx = self._build_work_dict(min_word_freq)
# read dataset into memory
self._load_anno()
def word_count(self, f, word_freq=None):
if word_freq is None:
word_freq = collections.defaultdict(int)
for l in f:
for w in l.strip().split():
word_freq[w] += 1
word_freq['<s>'] += 1
word_freq['<e>'] += 1
return word_freq
def _build_work_dict(self, cutoff):
train_filename = './simple-examples/data/ptb.train.txt'
test_filename = './simple-examples/data/ptb.valid.txt'
with tarfile.open(self.data_file) as tf:
trainf = tf.extractfile(train_filename)
testf = tf.extractfile(test_filename)
word_freq = self.word_count(testf, self.word_count(trainf))
if '<unk>' in word_freq:
# remove <unk> for now, since we will set it as last index
del word_freq['<unk>']
word_freq = [
x for x in word_freq.items() if x[1] > self.min_word_freq
]
word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*word_freq_sorted))
word_idx = dict(list(zip(words, range(len(words)))))
word_idx['<unk>'] = len(words)
return word_idx
def _load_anno(self):
self.data = []
with tarfile.open(self.data_file) as tf:
filename = f'./simple-examples/data/ptb.{self.mode}.txt'
f = tf.extractfile(filename)
UNK = self.word_idx['<unk>']
for l in f:
if self.data_type == 'NGRAM':
assert self.window_size > -1, 'Invalid gram length'
l = ['<s>'] + l.strip().split() + ['<e>']
if len(l) >= self.window_size:
l = [self.word_idx.get(w, UNK) for w in l]
for i in range(self.window_size, len(l) + 1):
self.data.append(tuple(l[i - self.window_size : i]))
elif self.data_type == 'SEQ':
l = l.strip().split()
l = [self.word_idx.get(w, UNK) for w in l]
src_seq = [self.word_idx['<s>']] + l
trg_seq = l + [self.word_idx['<e>']]
if self.window_size > 0 and len(src_seq) > self.window_size:
continue
self.data.append((src_seq, trg_seq))
else:
raise AssertionError('Unknow data type')
def __getitem__(self, idx):
return tuple([np.array(d) for d in self.data[idx]])
def __len__(self):
return len(self.data)
| true |
f18931710416e613fa2d4a1a9befabca87ac474e | Python | eduardomezencio/tsim | /tsim/core/network/orientedway.py | UTF-8 | 4,653 | 2.78125 | 3 | [
"MIT"
] | permissive | """OrientedWay class."""
from __future__ import annotations
from dataclasses import dataclass
from typing import (TYPE_CHECKING, Iterable, Iterator, NamedTuple, Optional,
Tuple)
from dataslots import with_slots
from tsim.core.entity import EntityRef
from tsim.core.network.endpoint import Endpoint
if TYPE_CHECKING:
from tsim.core.geometry import Point, Vector
from tsim.core.network.lane import Lane, LaneRef
from tsim.core.network.node import Node
from tsim.core.network.way import Way
class OrientedWay(NamedTuple):
"""A tuple containing a Way and an Endpoint.
Used to represent a specific connection of a Way and a Node. Useful when
referencing an incident Way from a Node, to avoid ambiguity, since both
ends of a Way can be connected to the same Node. Can be interpreted as the
given Way in the direction starting from the given Endpoint.
"""
way_ref: EntityRef[Way]
endpoint: Endpoint
@staticmethod
def build(way: Way, endpoint: Endpoint):
"""Create OrientedWay from a Way instead of a weak reference."""
return OrientedWay(EntityRef(way), endpoint)
@property
def way_id(self) -> int:
"""Get the id of the referenced way."""
return self.way_ref.id
@property
def way(self) -> Optional[Way]:
"""Get the referenced way."""
return self.way_ref()
@property
def start(self) -> Node:
"""Get the source node of the way in this direction."""
return (self.way.start if self.endpoint is Endpoint.START
else self.way.end)
@property
def end(self) -> Node:
"""Get the target node of the way in this direction."""
return (self.way.end if self.endpoint is Endpoint.START
else self.way.start)
@property
def start_offset(self):
"""Distance from the start node to the start of the way geometry."""
return (self.way.start_offset if self.endpoint is Endpoint.START
else self.way.end_offset)
@property
def end_offset(self):
"""Distance from the end node to the end of the way geometry."""
return (self.way.end_offset if self.endpoint is Endpoint.START
else self.way.start_offset)
@property
def lane_count(self) -> int:
"""Get the number of lanes of the way in this direction."""
return self.way.lane_count[self.endpoint.value]
@property
def length(self) -> float:
"""Get the length of the way."""
return self.way.length
@property
def weight(self) -> float:
"""Get the weight of the way in this direction."""
return self.way.weight[self.endpoint.value]
@property
def way_connections(self) -> Iterable[OrientedWay]:
"""Get the way connections from the end of this oriented way."""
return self.end.way_connections(self)
def flipped(self) -> OrientedWay:
"""Get OrientedWay with same way in the other direction."""
return OrientedWay(self.way_ref, self.endpoint.other)
def lane(self, index: int) -> Lane:
"""Get lane with given index in the direction of the oriented way."""
return self.way.lanes[index if self.endpoint is Endpoint.START
else -(index + 1)]
def lane_refs(self, l_to_r: bool = True, include_opposite: bool = False,
opposite_only: bool = False,
positive: bool = False) -> Iterator[LaneRef]:
"""Get lane references in the direction of the oriented way.
A shortcut for Way.lane_refs.
"""
return self.way.lane_refs(self.endpoint, l_to_r, include_opposite,
opposite_only, positive)
def points(self, offsets: Optional[Tuple[float, Optional[float]]] = None,
skip: int = 0) -> Iterator[Point]:
"""Get generator for points in order, including nodes and waypoints."""
return self.way.points(offsets=offsets, skip=skip,
reverse=self.endpoint is Endpoint.END)
def __repr__(self):
return (f'{OrientedWay.__name__}(way_id={self.way.id}, '
f'endpoint={self.endpoint.name[0]})')
@with_slots
@dataclass(frozen=True)
class OrientedWayPosition:
"""A position in an `OrientedWay`.
The position is in meters from the endpoint of the oriented way.
"""
oriented_way: OrientedWay
position: float
def world_position(self) -> Tuple[Point, Vector]:
"""Get world position and direction at this oriented way position."""
raise NotImplementedError()
| true |
07baa94ce6ad5453d2145ea19706190f42489058 | Python | gswyhq/hello-world | /python相关/十进制与62进制的转换.py | UTF-8 | 3,760 | 3.890625 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 对于62进制,从0数到9以后,10用小写字母a表示,接着数完26个字母,到z为35,然后36为大写字母A,一直到61为大写字母Z。所以,我们可以实现十进制数字base62编码的encode和decode。
import math
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def base62_encode(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return num
# 对于一个新浪微博url,它是形如:http://weibo.com/2991905905/z579Hz9Wr,中间的数字是用户的uid,重要的是后面的字符串“z579Hz9Wr”。它的计算其实也很简单,从后向前四个字符一组,就得到:
# z
# 579H
# z9Wr
# 将每个字符串用base62编码来decode,就可以得到它们的十进制数字分别为:
# 35
# 1219149
# 8379699
# 将它们拼起来就可以得到mid为:“3512191498379699”。这里要强调的是:对于除了开头的字符串,如果得到的十进制数字不足7位,需要在前面补足0。比如得到的十进制数分别为:35,33040,8906190,则需要在33040前面添上两个0。
def url_to_mid(url):
'''
>>> url_to_mid('z0JH2lOMb')
3501756485200075
>>> url_to_mid('z0Ijpwgk7')
3501703397689247
>>> url_to_mid('z0IgABdSn')
3501701648871479
>>> url_to_mid('z08AUBmUe')
3500330408906190
>>> url_to_mid('z06qL6b28')
3500247231472384
>>> url_to_mid('yCtxn8IXR')
3491700092079471
>>> url_to_mid('yAt1n2xRa')
3486913690606804
'''
result_str = ''
url = str(url)
len_1 = len(url) % 4
for i in range(math.ceil(len(url)/4)):
if i == 0 and len_1 > 0:
result_str += str(base62_decode(url[:len_1]))
else:
result_str += str(base62_decode(url[len_1+4*(i-1):len_1+4*i])).zfill(7)
return int(result_str)
# mid转为url也就很简单了,对于一个mid,我们从后向前每7位一组,用base62编码来encode,拼起来即可。同样要注意的是,每7个一组的数字,除了开头一组,如果得到的62进制数字不足4位,需要补足0。
def mid_to_url(midint):
'''
>>> mid_to_url(3501756485200075)
'z0JH2lOMb'
>>> mid_to_url(3501703397689247)
'z0Ijpwgk7'
>>> mid_to_url(3501701648871479)
'z0IgABdSn'
>>> mid_to_url(3500330408906190)
'z08AUBmUe'
>>> mid_to_url(3500247231472384)
'z06qL6b28'
>>> mid_to_url(3491700092079471)
'yCtxn8IXR'
>>> mid_to_url(3486913690606804)
'yAt1n2xRa'
'''
midint = str(midint)[::-1]
size = len(midint) // 7 if len(midint) % 7 == 0 else len(midint) // 7 + 1
result = []
for i in range(size):
s = midint[i * 7: (i + 1) * 7][::-1]
s = base62_encode(int(s))
s_len = len(s)
if i < size - 1 and len(s) < 4:
s = '0' * (4 - s_len) + s
result.append(s)
result.reverse()
return ''.join(result)
def main():
pass
if __name__ == '__main__':
main() | true |
e59bb70c95d6138373bffeb524bb18e6008a621b | Python | wsifan/debt-test | /runtest.py | UTF-8 | 1,063 | 2.703125 | 3 | [] | no_license | import unittest
import json
import requests
from HTMLTestRunner import HTMLTestRunner
import time
#加载测试文件
from test_case import test_userlogin
#构造测试集
suit = unittest.TestSuite()
#TestSuite类的addTest()方法把不同测试类中的测试方法组装到测试套件中。
#增加测试用例==》接口文件名.接口类(方法也就是这个接口的其他用例),要把每一个测试用例都增加进来!!!
suit.addTest()
if __name__ == "__main__":
testunit = unittest.TestSuite()
testunit.addTest(suite)
# 按照一定的格式获取当前的时间
now = time.strftime("%Y-%m-%d %H_%M_%S")
# 定义报告存放路径
filename = './' + now + 'test_result.html'
fp = open(filename, "wb")
# 定义测试报告
runner = HTMLTestRunner(stream=fp,
title="xxx接口测试报告",
description="测试用例执行情况:")
# 运行测试
runner.run(testunit)
fp.close() # 关闭文件对象把数据写进磁盘 | true |
580440516626018c601dd8576c605fc36e875c18 | Python | Aasthaengg/IBMdataset | /Python_codes/p03252/s714583260.py | UTF-8 | 773 | 2.84375 | 3 | [] | no_license | import sys
import itertools
# import numpy as np
import time
import math
import heapq
from collections import defaultdict
sys.setrecursionlimit(10 ** 7)
INF = 10 ** 18
MOD = 10 ** 9 + 7
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
# map(int, input().split())
S = input()
T = input()
x = defaultdict(list)
y = defaultdict(list)
for i, t in enumerate(T):
x[t].append(S[i])
for i, s in enumerate(S):
y[s].append(T[i])
ok = True
for k, v in x.items():
a = v[0]
for c in v:
if c != a:
ok = False
break
for k, v in y.items():
a = v[0]
for c in v:
if c != a:
ok = False
break
if ok:
print("Yes")
else:
print("No")
| true |
4456000c4d81804cd6092cd7214dbe5bfc892ca3 | Python | Sirius1942/skiWorkSpace | /drivers/KippRequests.py | UTF-8 | 384 | 2.84375 | 3 | [] | no_license | import requests
# 发送消息格式:{"url":"www.baidu.com"}
def request(data):
r = requests.get(url="http://www.baidu.com") # 最基本的GET请求
return r
# print(r.status_code) # 获取返回状态
# r = requests.get(url='http://dict.baidu.com/s', params={'wd':'python'}) #带参数的GET请求
# print(r.url)
# print(r.text)
| true |
a2145373dd173a36f06ab3dfc87e9674acee5613 | Python | StanleyGoldman/TitanicPython | /main_tests.py | UTF-8 | 4,257 | 2.90625 | 3 | [] | no_license | import main
from unittest import TestCase
class Test_extract_cabin_floor(TestCase):
def test_none(self):
self.assertEqual(main.extract_cabin_floor(None), None)
def test_empty(self):
self.assertEqual(main.extract_cabin_floor(""), None)
def test_single_floor(self):
self.assertEqual(main.extract_cabin_floor("C"), "C")
def test_single_floor_duplicates(self):
self.assertEqual(main.extract_cabin_floor("C C"), "C")
self.assertEqual(main.extract_cabin_floor("C C C"), "C")
def test_multiple_floors(self):
self.assertEqual(main.extract_cabin_floor("C D"), "CD")
def test_multiple_floors_duplicated(self):
self.assertEqual(main.extract_cabin_floor("C C D"), "CD")
def test_single_room(self):
self.assertEqual(main.extract_cabin_floor("C45"), "C")
def test_multiple_room_same_floor(self):
self.assertEqual(main.extract_cabin_floor("C45 C46"), "C")
def test_multiple_room_different_floors(self):
self.assertEqual(main.extract_cabin_floor("C45 D46"), "CD")
def test_floor_and_room_same_floor(self):
self.assertEqual(main.extract_cabin_floor("C C45"), "C")
self.assertEqual(main.extract_cabin_floor("C45 C"), "C")
def test_floor_and_room_different_floors(self):
self.assertEqual(main.extract_cabin_floor("C D46"), "CD")
self.assertEqual(main.extract_cabin_floor("C46 D"), "CD")
class Test_extract_cabin_room(TestCase):
def test_none(self):
self.assertEqual(main.extract_cabin_room(None), [])
def test_empty(self):
self.assertEqual(main.extract_cabin_room(""), [])
def test_single_floor(self):
self.assertEqual(main.extract_cabin_room("C"), [])
def test_single_floor_duplicates(self):
self.assertEqual(main.extract_cabin_room("C C"), [])
self.assertEqual(main.extract_cabin_room("C C C"), [])
def test_multiple_floors(self):
self.assertEqual(main.extract_cabin_room("C D"), [])
def test_multiple_floors_duplicated(self):
self.assertEqual(main.extract_cabin_room("C C D"), [])
def test_single_room(self):
self.assertEqual(main.extract_cabin_room("C45"), [45])
def test_multiple_room_same_floor(self):
self.assertEqual(main.extract_cabin_room("C45 C46"), [45, 46])
def test_multiple_room_different_floors(self):
self.assertEqual(main.extract_cabin_room("C45 D46"), [45, 46])
def test_floor_and_room_same_floor(self):
self.assertEqual(main.extract_cabin_room("C C45"), [45])
self.assertEqual(main.extract_cabin_room("C45 C"), [45])
def test_floor_and_room_different_floors(self):
self.assertEqual(main.extract_cabin_room("C D46"), [46])
self.assertEqual(main.extract_cabin_room("C46 D"), [46])
class Test_extract_name_data(TestCase):
def test_name_1(self):
self.assertEqual(main.extract_name_data("Abrahim, Mrs. Joseph"),
dict(LastName="Abrahim", Salutation="Mrs", Title="Mrs", FirstName="Joseph", SpouseName=None,
MaidenName=None))
def test_name_2(self):
self.assertEqual(main.extract_name_data("Abrahim, Mrs. Joseph (Sophie Easu)"),
dict(LastName="Abrahim", Salutation="Mrs", Title="Mrs", FirstName="Joseph", SpouseName="Sophie",
MaidenName="Easu"))
def test_name_3(self):
self.assertEqual(main.extract_name_data("Abrahim, Mrs. Joseph (Sophie Halaut Easu)"),
dict(LastName="Abrahim", Salutation="Mrs", Title="Mrs", FirstName="Joseph", SpouseName="Sophie Halaut",
MaidenName="Easu"))
def test_name_4(self):
self.assertEqual(main.extract_name_data("Abrahim, Mrs. (Sophie Halaut Easu)"),
dict(LastName="Abrahim", Salutation="Mrs", Title="Mrs", FirstName=None, SpouseName="Sophie Halaut",
MaidenName="Easu"))
def test_name_5(self):
self.assertEqual(main.extract_name_data("Karnes, Mrs. J Frank (Claire Bennett)"),
dict(LastName="Karnes", Salutation="Mrs", Title="Mrs", FirstName="J Frank", SpouseName="Claire",
MaidenName="Bennett"))
| true |
2e6d26eeddc87de2aa50efd13aad7a1242d77460 | Python | jasaaved/Resume | /Python Game/battle_effectiveness.py | UTF-8 | 5,545 | 3.140625 | 3 | [] | no_license | import pokemon_info
import random
from collections import namedtuple
def attack_effectiveness(attacker: namedtuple, defender: namedtuple) -> str:
'''
Determines the effectiveness of an attack. Some Pokemon are dual types so this function takes account of that
'''
result = 0
for att_type in attacker.type:
for def_type in defender.type:
if def_type == 'Bug':
if att_type == 'Fire' or att_type == 'Flying' or att_type == 'Rock':
result += 1
elif att_type == 'Fighting' or att_type == 'Ground' or att_type == 'Grass':
result -= 1
elif def_type == 'Dragon':
if att_type == 'Ice' or att_type == 'Dragon':
result += 1
elif att_type == 'Electric' or att_type == 'Fire' or att_type == 'Grass' or att_type == 'Water':
result -= 1
elif def_type == 'Electric':
if att_type == 'Ground':
result += 1
elif att_type == 'Flying' or att_type == 'Electric':
result -= 1
elif def_type == 'Fighting':
if att_type == 'Flying' or att_type == 'Psychic':
result += 1
elif att_type == 'Rock' or att_type == 'Bug':
result -= 1
elif def_type == 'Fire':
if att_type == 'Water' or att_type == 'Rock' or att_type == 'Ground':
result += 1
elif att_type == 'Fire' or att_type == 'Grass' or att_type == 'Bug' or att_type == 'Ice':
result -= 1
elif def_type == 'Flying':
if att_type == 'Rock' or att_type == 'Electric' or att_type == 'Ice':
result += 1
elif att_type == 'Fighting' or att_type == 'Ground' or att_type == 'Bug' or att_type == 'Grass':
result -= 1
elif def_type == 'Ghost':
if att_type == 'Ghost':
result += 1
elif att_type == 'Poison' or att_type == 'Normal' or att_type == 'Fighting' or att_type == 'Bug':
result -= 1
elif def_type == 'Grass':
if att_type == 'Bug' or att_type == 'Fire' or att_type == 'Flying' or att_type == 'Ice' or att_type == 'Poison':
result += 1
elif att_type == 'Electric' or att_type == 'Grass' or att_type == 'Water' or att_type == 'Ground':
result -= 1
elif def_type == 'Ground':
if att_type == 'Grass' or att_type == 'Ice' or att_type == 'Water':
result += 1
elif att_type == 'Poison' or att_type == 'Rock' or att_type == 'Electric':
result -= 1
elif def_type == 'Ice':
if att_type == 'Fire' or att_type == 'Fighting' or att_type == 'Rock':
result += 1
elif att_type == 'Ice':
result -= 1
elif def_type == 'Normal':
if att_type == 'Fighting':
result += 1
elif att_type == 'Ghost':
result -= 1
elif def_type == 'Poison':
if att_type == 'Psychic' or att_type == 'Ground':
result += 1
elif att_type == 'Fighting' or att_type == 'Poison' or att_type == 'Grass':
result -= 1
elif def_type == 'Psychic':
if att_type == 'Ghost' or att_type == 'Bug':
result += 1
elif att_type == 'Fighting' or att_type == 'Psychic':
result -= 1
elif def_type == 'Rock':
if att_type == 'Fighting' or att_type == 'Grass' or att_type == 'Water' or att_type == 'Ground':
result += 1
elif att_type == 'Normal' or att_type == 'Flying' or att_type == 'Poison' or att_type == 'Fire':
result -= 1
elif def_type == 'Water':
if att_type == 'Electric' or att_type == 'Grass':
result += 1
elif att_type == 'Fire' or att_type == 'Water' or att_type == 'Ice':
result -= 1
if result <= -1:
return 'Not very effective'
elif result == 0:
return 'Normal damage'
elif result >= 1:
return 'Super effective'
def return_damage_amount(multiplier: str) -> str:
'''
From the previous function, this function will determine the exact damage the attack will make.
'''
random_count = random.randrange(0, 20)
if multiplier == 'Not very effective':
if random_count >= 0 and random_count <= 4:
return '50'
elif random_count >= 5 and random_count <= 14:
return '25'
elif random_count >= 15 and random_count <= 19:
return '25'
elif multiplier == 'Normal damage':
if random_count >= 0 and random_count <= 4:
return '100'
elif random_count >= 5 and random_count <= 12:
return '50'
elif random_count >= 13 and random_count <= 17:
return '25'
elif random_count >= 18 and random_count <= 19:
return '0'
elif multiplier == 'Super effective':
if random_count >= 0 and random_count <= 14:
return '100'
elif random_count >= 15 and random_count <= 19:
return '50'
| true |
59a0d4a8a668ea1adfb1dc941643bffc34131ccc | Python | Xen0neX/mocking_in_tdd | /d_roll.py | UTF-8 | 194 | 2.546875 | 3 | [] | no_license | #Arihant Kunda
#June 16, 2021
#Idea for project taken from: https://www.youtube.com/watch?v=6tNS--WetLI
import random
def roll_dice():
"""sumlates a dice roll"""
return random.randint(1, 6)
| true |
819de3000b7b8f6f382d44b970fd7c0e061eb4da | Python | Aasthaengg/IBMdataset | /Python_codes/p02382/s246119089.py | UTF-8 | 522 | 3.453125 | 3 | [] | no_license | #
# 10d
#
import math
def main():
n = int(input())
x = list(map(int, input().split()))
y = list(map(int, input().split()))
d1 = 0
d2 = 0
d3 = 0
dn = 0
for i in range(n):
d1 += abs(x[i] - y[i])
d2 += (x[i] - y[i])**2
d3 += abs(x[i] - y[i])**3
dn = max(dn, abs(x[i] - y[i]))
d2 = math.sqrt(d2)
d3 = math.pow(d3, 1/3)
print(f"{d1:.5f}")
print(f"{d2:.5f}")
print(f"{d3:.5f}")
print(f"{dn:.5f}")
if __name__ == '__main__':
main()
| true |
0716893552a36be73beab169e467997cef9b1e08 | Python | RashmiTiwari132/ProjectS3Lab | /my_application/samplepython.py | UTF-8 | 56 | 3.171875 | 3 | [] | no_license | for x in range(0, 10):
print "We are on time %d" % (x)
| true |
f4070f1f521e873bdd0a45076c703c34c991a9d1 | Python | dodonmountain/algorithm | /2019_late/20190827/swea_4880_토너먼트카드게임.py | UTF-8 | 762 | 3.546875 | 4 | [] | no_license | import sys
sys.stdin = open("input.txt")
T = int(input())
def game(a, b): # 가위바위보, 낸 손 모양이 아니라 인덱스를 리턴해야함
A, B = lst[a-1], lst[b-1]
vs = {1: 3, 2: 1, 3: 2}
if vs[A] == B:
return a
if vs[B] == A:
return b
return a
def divide(start, end):
if start == end: # 종료 조건. 시작과 끝이 같으면 나눌수 없다 => 우승자 결정
return start
left, right = divide(start, (start + end) //2), divide((start + end) // 2 + 1, end) # start + end 를 괄호로 감싸는것은 중요했다.
return game(left, right)
for t_case in range(T):
N = int(input())
lst = list(map(int, input().split()))
print("#{} {}".format(t_case+1, divide(1, N)))
| true |
158a6a81f2da23d8c976a20fddb35f17ebf5a02f | Python | amarendrafst/NeuralMap | /tests/tests_neighbourhood_functions.py | UTF-8 | 7,007 | 2.8125 | 3 | [
"MIT"
] | permissive | import unittest
import numpy as np
from ..neural_map import bubble, conical, gaussian, gaussian_cut, mexican_hat, no_neighbourhood
TOLERANCE = 1e-8
def euclidean(f_element, s_element):
return ((f_element[0] - s_element[0]) ** 2 + (f_element[1] - s_element[1]) ** 2) ** 0.5
cart_coord = np.array([
[[0.5, 0.], [0., 0.8660254], [0.5, 1.73205081], [0., 2.59807621], [0.5, 3.46410162]],
[[1.5, 0.], [1., 0.8660254], [1.5, 1.73205081], [1., 2.59807621], [1.5, 3.46410162]],
[[2.5, 0.], [2., 0.8660254], [2.5, 1.73205081], [2., 2.59807621], [2.5, 3.46410162]],
[[3.5, 0.], [3., 0.8660254], [3.5, 1.73205081], [3., 2.59807621], [3.5, 3.46410162]],
[[4.5, 0.], [4., 0.8660254], [4.5, 1.73205081], [4., 2.59807621], [4.5, 3.46410162]],
[[5.5, 0.], [5., 0.8660254], [5.5, 1.73205081], [5., 2.59807621], [5.5, 3.46410162]],
[[6.5, 0.], [6., 0.8660254], [6.5, 1.73205081], [6., 2.59807621], [6.5, 3.46410162]],
[[7.5, 0.], [7., 0.8660254], [7.5, 1.73205081], [7., 2.59807621], [7.5, 3.46410162]]
])
COLUMN = 1
ROW = 1
RADIUS = 2.
LEARNING_RATE = 0.5
bmu = cart_coord[COLUMN, ROW]
class BubbleTestCase(unittest.TestCase):
def setUp(self):
self.tested_function = bubble
self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE)
def test_bmu_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position')
def test_max_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max())
self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix')
def test_neighbourhood_values(self):
for i in range(cart_coord.shape[0]):
for j in range(cart_coord.shape[1]):
neighbourhood_membership = RADIUS - euclidean(cart_coord[i, j], bmu)
if neighbourhood_membership > 0:
error = abs(self.update_matrix[i, j] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'g matrix has an incorrect values')
else:
error = abs(self.update_matrix[i, j])
self.assertLessEqual(error, TOLERANCE, 'g matrix has an incorrect values')
class ConicalTestCase(unittest.TestCase):
def setUp(self):
self.tested_function = conical
self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE)
def test_bmu_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position')
def test_max_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max())
self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix')
def test_neighbourhood_values(self):
for i in range(cart_coord.shape[0]):
for j in range(cart_coord.shape[1]):
neighbourhood_membership = RADIUS - euclidean(cart_coord[i, j], bmu)
if neighbourhood_membership > 0:
self.assertGreater(self.update_matrix[i, j], 0,
'g matrix has an incorrect values')
else:
error = abs(self.update_matrix[i, j])
self.assertLessEqual(error, TOLERANCE, 'g matrix map has an incorrect values')
class GaussianTestCase(unittest.TestCase):
def setUp(self):
self.tested_function = gaussian
self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE)
def test_bmu_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position')
def test_max_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max())
self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix')
def test_neighbourhood_values(self):
for i in range(cart_coord.shape[0]):
for j in range(cart_coord.shape[1]):
self.assertGreater(self.update_matrix[i, j], 0, 'g matrix has an incorrect values')
class GaussianCutTestCase(unittest.TestCase):
def setUp(self):
self.tested_function = gaussian_cut
self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE)
def test_bmu_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position')
def test_max_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max())
self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix')
def test_neighbourhood_values(self):
for i in range(cart_coord.shape[0]):
for j in range(cart_coord.shape[1]):
neighbourhood_membership = RADIUS - euclidean(cart_coord[i, j], bmu)
if neighbourhood_membership > 0:
self.assertGreater(self.update_matrix[i, j], 0,
'g matrix has an incorrect values')
else:
error = abs(self.update_matrix[i, j])
self.assertLessEqual(error, TOLERANCE, 'g matrix map has an incorrect values')
class MexicanHatTestCase(unittest.TestCase):
def setUp(self):
self.tested_function = mexican_hat
self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE)
def test_bmu_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position')
def test_max_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max())
self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix')
def test_min_values(self):
self.assertLess(self.update_matrix.min(), 0, 'min value is greater or equal than zero')
class NoNeighbourhoodTestCase(unittest.TestCase):
def setUp(self):
self.tested_function = no_neighbourhood
self.update_matrix = self.tested_function(cart_coord, bmu, RADIUS, LEARNING_RATE)
def test_bmu_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - LEARNING_RATE)
self.assertLessEqual(error, TOLERANCE, 'wrong value in bmu position')
def test_max_value(self):
error = abs(self.update_matrix[COLUMN, ROW] - self.update_matrix.max())
self.assertLessEqual(error, TOLERANCE, 'bmu has not the greatest value in g matrix')
def test_min_values(self):
g_c = self.update_matrix.copy()
g_c[COLUMN, ROW] = 0.
self.assertEqual(g_c.min(), 0, 'min value is not zero')
self.assertEqual(g_c.max(), 0, 'max value is not zero (excluding bmu)')
| true |
daddd439cb88ee3b68351601f8f8f32007ae44e4 | Python | Vidoux/N_Reines-Algo_Avanc- | /Damier.py | UTF-8 | 1,158 | 3.015625 | 3 | [] | no_license | class Damier:
def __init__(self, taille):
self.taille = taille
self.grille = self.__new_grille()
def supprimer_dame(self, coordonnées):
self.grille[coordonnées.ligne][coordonnées.colonne] = 0
def set_case_dame(self, coordonnées):
self.grille[coordonnées.ligne][coordonnées.colonne] = 1
def get_case(self, coordonnées):
return self.grille[coordonnées.ligne][coordonnées.colonne]
def toString(self):
string = ""
for i in range(0, self.taille):
ligne = ""
for j in range(0, self.taille):
ligne += " " + str(self.grille[i][j])
string += ligne
if i < self.taille - 1:
string += "\n"
return string
def get_taille(self):
return self.taille
def set_grille_spécifique(self, grille):
self.grille = grille
# PRIVATE functions
def __new_grille(self):
tab = []
for i in range(0, self.taille):
ligne = []
for j in range(0, self.taille):
ligne.append(0)
tab.append(ligne)
return tab
| true |
0455709f02f02308842ccd7587a452cf3965ea39 | Python | MachinaExHumane/voice-dataset-creation | /scripts/markersfile_to_metadata.py | UTF-8 | 3,073 | 2.609375 | 3 | [] | no_license | import argparse
import os
import sys
from shutil import copyfile, rmtree
import pandas as pd
def audition(wavs_export_path, wavs_final_path, input_filename, output_filename):
create_metadata_and_wavs("audition", wavs_export_path, wavs_final_path, input_filename, output_filename)
def audacity(wavs_export_path, wavs_final_path, input_filename, output_filename):
create_metadata_and_wavs("audacity", wavs_export_path, wavs_final_path, input_filename, output_filename)
def create_metadata_and_wavs(file_type, wavs_export_path, wavs_final_path, input_filename, output_filename):
# delete and recreate wavs_final_path when rerunning
if os.path.exists(wavs_final_path):
rmtree(wavs_final_path)
os.mkdir(wavs_final_path)
fp = open(output_filename, 'w')
if file_type == 'audition':
df = pd.read_csv(input_filename, sep='\t', encoding='utf-8')
elif file_type =='audacity':
df = pd.read_csv(input_filename, sep='\t', encoding='utf-8', usecols=[2, 3], names=['Name', 'Description'])
for wav_marker_name, sentence in zip(df['Name'].to_list(), df['Description'].to_list()):
if not pd.isnull(sentence):
print(sentence)
wav_path_orig = str(wav_marker_name) + ".wav"
wav_path = wav_path_orig.replace(" ", "_").replace("M", "m")
wav_filename = wav_path.replace(".wav", "")
copyfile(wavs_export_path + wav_path_orig, wavs_final_path + wav_path)
fp.write(f"{wav_filename}|{sentence}|{sentence}\n")
fp.close()
def execute_cmdline(argv):
prog = argv[0]
parser = argparse.ArgumentParser(
prog = prog,
description = 'Convert markers file into metadata format',
epilog = 'Type "%s <command> -h" for more information.' % prog
)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
def add_command(cmd, desc, example=None):
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
p = add_command( 'audition', 'Audition format', 'audition')
p.add_argument( '--wavs_export_path', default="../test_data/wavs_export_audition/")
p.add_argument( '--wavs_final_path', default="../test_data/wavs/")
p.add_argument( '--input_filename', default="../test_data/Markers.csv")
p.add_argument( '--output_filename', default="../test_data/metadata.csv")
p = add_command( 'audacity', 'Audacity format', 'audacity')
p.add_argument( '--wavs_export_path', default="../test_data/wavs_export_audacity/")
p.add_argument( '--wavs_final_path', default="../test_data/wavs/")
p.add_argument( '--input_filename', default="../test_data/Label Track STT.txt")
p.add_argument( '--output_filename', default="../test_data/metadata.csv")
args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
func = globals()[args.command]
del args.command
func(**vars(args))
if __name__ == "__main__":
execute_cmdline(sys.argv)
| true |
aa1bebd2cfbf29fb05e291f9c1ae5624f762d97f | Python | delebasyq/bitmex-algo | /rtTrader.py | UTF-8 | 2,466 | 2.71875 | 3 | [] | no_license | import redis
import redisCommands as rd
import pandas as pd
import threading
import concurrent.futures
from datetime import datetime
import redisCommands as rd
import time
import random
#Realtime trading parameters
symbol = 'XBTUSD'
qty = -1
def wait_until(func):
while(True):
s = xstream.xbid.read()
for message in s:
if hasattr(message, 'stream'):
return(func(message))
def wait_until_par(*args):
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(wait_until, *args)
return_value = future.result()
return(return_value)
#t = threading.Thread(target=wait_until, args=args)
#t.start()
#return(t)
#IF YOU WANT TO GET DATA W/O DELAY BY USING THIS FUNCTION, USE THE FOLLOWING:
#data = wait_until_par(read_xstream)
def read_xstream(message):
data = rd.xMsg()
data.price = message.data['price']
data.amount = message.data['amount']
data.timestamp = message.timestamp
#df = pd.DataFrame(data, columns=["price", "amount", "timestamp"])
return(data)
xstream = rd.xstream()
#example of polling through the latest price data with it being a data object
#while(True):
# data = wait_until_par(read_xstream)
# print(data.price, data.amount, data.timestamp)
#variables to replace later with trade logic
'''
buy = []
sell = []
for i in range(100):
buy.append(bool(random.getrandbits(1)))
sell.append(bool(random.getrandbits(1)))
'''
def getSignal(price, amount):
buy = bool(random.getrandbits(1))
sell = bool(random.getrandbits(1))
return('buy')
while(True):
data = wait_until_par(read_xstream)
price = int(data.price)
amount = int(data.amount)
print(data.price, data.amount, data.timestamp)
if (getSignal(int(price), int(amount)) == 'buy')
order = bitmex_client.Order.Order_new(symbol, orderQty=qty, price=price).result()
print(order_result)
orders = bitmex_client.Order.Order_getOrders().result()[0]
print("TRADE")
#Print all our current orders
for order in orders:
print(order)
processed_order = {}
processed_order["symbol"] = order["symbol"]
processed_order["amount"] = str(order["orderQty"]).split("L")[0]
processed_order["price"] = order["price"]
processed_order["side"] = order["side"]
processed_order["status"] = order["ordStatus"]
print(processed_order)
time.sleep(1) | true |
d0df9396e098618c6e19e793b4633f6a977c9d8a | Python | bobby20180331/Algorithms | /LeetCode/python/_290.WordPattern.py | UTF-8 | 2,537 | 3.671875 | 4 | [] | no_license | # 虽然是自己写自己调试的,但是参考了别人的思路,说是什么集合论双射,先打上_吧。35ms,超过93%。
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
ss = str.split(' ')
if len(pattern)!=len(ss):
return False
t= {}
for i in range(0,len(ss)):
if pattern[i] not in t:
t[pattern[i]]=ss[i]
else:
if t[pattern[i]]!=ss[i]:
return False
tkey = []
for i in t:
if t[i] not in tkey:
tkey.append(t[i])
else:
return False
return True
######## 时隔4年,这次没half自己做出来了, 但是效率低一些
# 35ms -> 44ms, 还多了14mb内存占用...(看了下代码差不多,list还换成set了,可能是因为4年前没统计内存占用)
## 果然 重新提交了一下上面得方案,时间和内存一样了,但是只超过34%速度和5%内存了...
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
str_list = s.split(' ')
count = len(pattern)
if count != len(str_list):
return False
map_dict = {}
value_set = set([])
for i in range(count):
# print(i)
if pattern[i] not in map_dict:
if str_list[i] in value_set:
return False
# print(pattern[i], str_list[i])
map_dict[pattern[i]] = str_list[i]
value_set.add(str_list[i])
#print(value_set)
else:
# print(map_dict[pattern[i]], str_list[i])
if map_dict[pattern[i]] != str_list[i]:
return False
return True
#看了下官方解法:就是用两个词典存储嘛。。。
# 36ms,14mb内存,超过79%速度和5%内存
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
word2ch = dict()
ch2word = dict()
words = s.split()
if len(pattern) != len(words):
return False
for ch, word in zip(pattern, words):
if (word in word2ch and word2ch[word] != ch) or (ch in ch2word and ch2word[ch] != word):
return False
word2ch[word] = ch
ch2word[ch] = word
return True
| true |
274724d5298457c6689e4ad5b2504cd70318dd7a | Python | cmsflash/wealth-projector | /projector.py | UTF-8 | 4,906 | 3.421875 | 3 | [] | no_license | TAX_BRACKETS = [
(0, 9700, 0.1),
(9700, 39475, 0.12),
(39475, 84200, 0.22),
(84200, 160725, 0.24),
(160726, 204100, 0.32),
(204100, 306750, 0.35),
(306750, float('inf'), 0.37),
]
def format_worth(label, value):
dollar_string = f'{value:,.02f}'.rjust(15)
string = f'{label:20s}{dollar_string}'
return string
def get_income_tax(amount):
tax = 0
for lower_limit, upper_limit, rate in TAX_BRACKETS:
tax += min(max(amount - lower_limit, 0), upper_limit) * rate
return tax
class CashFlow:
def __init__(
self, starting_value, growth_rate=1, saturation_value=float('inf'),
lifespan=float('inf')
):
self.value = starting_value
self.growth_rate = growth_rate
self.saturation_value = saturation_value
self.lifespan = lifespan
def step(self):
self.lifespan -= 1
if self.lifespan > 0:
self.value = min(
self.value * self.growth_rate, self.saturation_value
)
else:
self.value = 0
class Asset:
def __init__(self, starting_value, growth_rate=1):
self.value = starting_value
self.growth_rate = growth_rate
def asset_growth(self):
growth = self.value * (self.growth_rate - 1)
return growth
def step(self):
self.value = self.value * self.growth_rate
class Loan(Asset):
def __init__(self, amount, rate, length):
self.principal_amount = amount
self.payment_amount = rate * amount / (1 - (1 + rate) ** -length)
self.length = length
self.value = -self.payment_amount * length
def asset_growth(self):
growth = self.payment_amount if self.value < 0 else 0
return growth
def step(self):
self.value = min(self.value + self.payment_amount, 0)
def get_spending(self):
spending = CashFlow(self.payment_amount, lifespan=self.length)
return spending
def get_tax_deduction(self):
deduction = CashFlow(
self.payment_amount - self.principal_amount / self.length,
lifespan=self.length
)
return deduction
class Portfolio:
def __init__(
self,
initial_value=0,
investment_return_rate=1,
inflation_rate=1,
incomes=[],
spendings=[],
tax_deductions=[],
assets=[],
):
self.liquid_value = initial_value
self.investment_return_rate = investment_return_rate
self.inflation = CashFlow(1, inflation_rate)
self.incomes = incomes
self.spendings = spendings
self.tax_deductions = tax_deductions
self.assets = assets
def total_income(self):
income = self._get_total_value(self.incomes)
return income
def tax_deduction(self):
deduction = self._get_total_value(self.tax_deductions)
return deduction
def investment_return(self):
return_ = self.liquid_value * (self.investment_return_rate - 1)
return return_
def asset_value(self):
value = sum(asset.value for asset in self.assets)
return value
def asset_growth(self):
growth = sum(asset.asset_growth() for asset in self.assets)
return growth
def spending(self):
spending = self._get_total_value(self.spendings)
return spending
@classmethod
def _get_total_value(cls, cash_flows):
value = 0 if not cash_flows else sum(
flow.value for flow in cash_flows
)
return value
def step(self):
for income in self.incomes:
income.step()
for tax_deduction in self.tax_deductions:
tax_deduction.step()
for spending in self.spendings:
spending.step()
for asset in self.assets:
asset.step()
self.inflation.step()
self.liquid_value = (
self.liquid_value
+ self.total_income()
- get_income_tax(self.total_income() - self.tax_deduction())
+ self.investment_return()
- self.investment_return() * 0.2
- self.spending()
)
def total_value(self):
value = self.liquid_value + self.asset_value()
return value
def deflated_value(self):
value = self.total_value() / self.inflation.value
return value
def __str__(self):
string = (
f'{format_worth("Income", self.total_income())}\n'
f'{format_worth("Investment return", self.investment_return())}\n'
f'{format_worth("Asset growth", self.asset_growth())}\n'
f'{format_worth("Spending", self.spending())}\n'
f'{format_worth("Net worth", self.total_value())}\n'
f'{format_worth("After inflation", self.deflated_value())}'
)
return string
| true |