blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
124864ab1f97c15eee48e474368a05241ceda50e | Python | sshyran/Galileo-sdk | /galileo_sdk/business/objects/exceptions.py | UTF-8 | 133 | 2.640625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | class JobsException(Exception):
def __init__(self, job_id, msg=None):
self.job_id = job_id
super().__init__(msg)
| true |
cc6370b12ba8da581de4a976da77c8074b074afc | Python | ashemery/psut | /workshops/131214/diy5.py | UTF-8 | 476 | 3.75 | 4 | [] | no_license | #########################
# DIY 5 Answer
import random
f = open("file2.txt", "w")
for count in range(100):
rnumber = random.randint(0,99)
f.write(str(rnumber) + '\n')
f.close()
odd = []
even = []
f = open('file2.txt','r')
for line in f:
row = line.split()
for i in row:
if int(i) % 2 == 0:
even.append(int(i))
else:
odd.append(int(i))
print ("Even list is: ", even)
print ("Odd list is: ", odd)
| true |
ee8f604c0f22c470b4c31034ae3dcde4900fc4b0 | Python | djvita/python-control | /control/freqplot.py | UTF-8 | 15,685 | 2.53125 | 3 | [] | no_license | # freqplot.py - frequency domain plots for control systems
#
# Author: Richard M. Murray
# Date: 24 May 09
#
# This file contains some standard control system plots: Bode plots,
# Nyquist plots and pole-zero diagrams. The code for Nichols charts
# is in nichols.py.
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
import matplotlib.pyplot as plt
import scipy as sp
import numpy as np
from warnings import warn
from .ctrlutil import unwrap
from .bdalg import feedback
from .lti import isdtime, timebaseEqual
__all__ = ['bode_plot', 'nyquist_plot', 'gangof4_plot',
'bode', 'nyquist', 'gangof4']
#
# Main plotting functions
#
# This section of the code contains the functions for generating
# frequency domain plots
#
# Bode plot
def bode_plot(syslist, omega=None, dB=None, Hz=None, deg=None,
Plot=True, *args, **kwargs):
"""Bode plot for a system
Plots a Bode plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear input/output systems (single system is OK)
omega : freq_range
Range of frequencies (list or bounds) in rad/sec
dB : boolean
If True, plot result in dB
Hz : boolean
If True, plot frequency in Hz (omega must be provided in rad/sec)
deg : boolean
If True, plot phase in degrees (else radians)
Plot : boolean
If True, plot magnitude and phase
*args, **kwargs:
Additional options to matplotlib (color, linestyle, etc)
Returns
-------
mag : array (list if len(syslist) > 1)
magnitude
phase : array (list if len(syslist) > 1)
phase in radians
omega : array (list if len(syslist) > 1)
frequency in rad/sec
Notes
-----
1. Alternatively, you may use the lower-level method (mag, phase, freq)
= sys.freqresp(freq) to generate the frequency response for a system,
but it returns a MIMO response.
2. If a discrete time model is given, the frequency response is plotted
along the upper branch of the unit circle, using the mapping z = exp(j
\omega dt) where omega ranges from 0 to pi/dt and dt is the discrete
time base. If not timebase is specified (dt = True), dt is set to 1.
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> mag, phase, omega = bode(sys)
"""
# Set default values for options
from . import config
if (dB is None): dB = config.bode_dB
if (deg is None): deg = config.bode_deg
if (Hz is None): Hz = config.bode_Hz
# If argument was a singleton, turn it into a list
if (not getattr(syslist, '__iter__', False)):
syslist = (syslist,)
mags, phases, omegas = [], [], []
for sys in syslist:
if (sys.inputs > 1 or sys.outputs > 1):
#TODO: Add MIMO bode plots.
raise NotImplementedError("Bode is currently only implemented for SISO systems.")
else:
if omega is None:
# Select a default range if none is provided
omega = default_frequency_range(syslist)
# Get the magnitude and phase of the system
omega = np.array(omega)
mag_tmp, phase_tmp, omega_sys = sys.freqresp(omega)
mag = np.atleast_1d(np.squeeze(mag_tmp))
phase = np.atleast_1d(np.squeeze(phase_tmp))
phase = unwrap(phase)
if Hz:
omega_plot = omega_sys / (2 * np.pi)
else:
omega_plot = omega_sys
mags.append(mag)
phases.append(phase)
omegas.append(omega_sys)
# Get the dimensions of the current axis, which we will divide up
#! TODO: Not current implemented; just use subplot for now
if (Plot):
# Magnitude plot
plt.subplot(211);
if dB:
plt.semilogx(omega_plot, 20 * np.log10(mag), *args, **kwargs)
else:
plt.loglog(omega_plot, mag, *args, **kwargs)
plt.hold(True);
# Add a grid to the plot + labeling
plt.grid(True)
plt.grid(True, which='minor')
plt.ylabel("Magnitude (dB)" if dB else "Magnitude")
# Phase plot
plt.subplot(212);
if deg:
phase_plot = phase * 180 / np.pi
else:
phase_plot = phase
plt.semilogx(omega_plot, phase_plot, *args, **kwargs)
plt.hold(True);
# Add a grid to the plot + labeling
plt.grid(True)
plt.grid(True, which='minor')
plt.ylabel("Phase (deg)" if deg else "Phase (rad)")
# Label the frequency axis
plt.xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")
if len(syslist) == 1:
return mags[0], phases[0], omegas[0]
else:
return mags, phases, omegas
# Nyquist plot
def nyquist_plot(syslist, omega=None, Plot=True, color='b',
labelFreq=0, *args, **kwargs):
"""Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
omega : freq_range
Range of frequencies (list or bounds) in rad/sec
Plot : boolean
If True, plot magnitude
labelFreq : int
Label every nth frequency on the plot
*args, **kwargs:
Additional options to matplotlib (color, linestyle, etc)
Returns
-------
real : array
real part of the frequency response array
imag : array
imaginary part of the frequency response array
freq : array
frequencies
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> real, imag, freq = nyquist_plot(sys)
"""
# If argument was a singleton, turn it into a list
if (not getattr(syslist, '__iter__', False)):
syslist = (syslist,)
# Select a default range if none is provided
if omega is None:
#! TODO: think about doing something smarter for discrete
omega = default_frequency_range(syslist)
# Interpolate between wmin and wmax if a tuple or list are provided
elif (isinstance(omega,list) | isinstance(omega,tuple)):
# Only accept tuple or list of length 2
if (len(omega) != 2):
raise ValueError("Supported frequency arguments are (wmin,wmax) tuple or list, or frequency vector. ")
omega = np.logspace(np.log10(omega[0]), np.log10(omega[1]),
num=50, endpoint=True, base=10.0)
for sys in syslist:
if (sys.inputs > 1 or sys.outputs > 1):
#TODO: Add MIMO nyquist plots.
raise NotImplementedError("Nyquist is currently only implemented for SISO systems.")
else:
# Get the magnitude and phase of the system
mag_tmp, phase_tmp, omega = sys.freqresp(omega)
mag = np.squeeze(mag_tmp)
phase = np.squeeze(phase_tmp)
# Compute the primary curve
x = sp.multiply(mag, sp.cos(phase));
y = sp.multiply(mag, sp.sin(phase));
if (Plot):
# Plot the primary curve and mirror image
plt.plot(x, y, '-', color=color, *args, **kwargs);
plt.plot(x, -y, '--', color=color, *args, **kwargs);
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if (labelFreq):
ind = slice(None, None, labelFreq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega[ind]):
# Convert to Hz
f = omegapt/(2*sp.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f),8),-8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt,
' ' + str(int(np.round(f/1000**pow1000, 0))) +
' ' + prefix + 'Hz')
return x, y, omega
# Gang of Four
#! TODO: think about how (and whether) to handle lists of systems
def gangof4_plot(P, C, omega=None):
"""Plot the "Gang of 4" transfer functions for a system
Generates a 2x2 plot showing the "Gang of 4" sensitivity functions
[T, PS; CS, S]
Parameters
----------
P, C : LTI
Linear input/output systems (process and control)
omega : array
Range of frequencies (list or bounds) in rad/sec
Returns
-------
None
"""
if (P.inputs > 1 or P.outputs > 1 or C.inputs > 1 or C.outputs >1):
#TODO: Add MIMO go4 plots.
raise NotImplementedError("Gang of four is currently only implemented for SISO systems.")
else:
# Select a default range if none is provided
#! TODO: This needs to be made more intelligent
if omega is None:
omega = default_frequency_range((P,C))
# Compute the senstivity functions
L = P*C;
S = feedback(1, L);
T = L * S;
# Plot the four sensitivity functions
#! TODO: Need to add in the mag = 1 lines
mag_tmp, phase_tmp, omega = T.freqresp(omega);
mag = np.squeeze(mag_tmp)
phase = np.squeeze(phase_tmp)
plt.subplot(221); plt.loglog(omega, mag);
mag_tmp, phase_tmp, omega = (P*S).freqresp(omega);
mag = np.squeeze(mag_tmp)
phase = np.squeeze(phase_tmp)
plt.subplot(222); plt.loglog(omega, mag);
mag_tmp, phase_tmp, omega = (C*S).freqresp(omega);
mag = np.squeeze(mag_tmp)
phase = np.squeeze(phase_tmp)
plt.subplot(223); plt.loglog(omega, mag);
mag_tmp, phase_tmp, omega = S.freqresp(omega);
mag = np.squeeze(mag_tmp)
phase = np.squeeze(phase_tmp)
plt.subplot(224); plt.loglog(omega, mag);
#
# Utility functions
#
# This section of the code contains some utility functions for
# generating frequency domain plots
#
# Compute reasonable defaults for axes
def default_frequency_range(syslist):
"""Compute a reasonable default frequency range for frequency
domain plots.
Finds a reasonable default frequency range by examining the features
(poles and zeros) of the systems in syslist.
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = default_frequency_range(sys)
"""
# This code looks at the poles and zeros of all of the systems that
# we are plotting and sets the frequency range to be one decade above
# and below the min and max feature frequencies, rounded to the nearest
# integer. It excludes poles and zeros at the origin. If no features
# are found, it turns logspace(-1, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
# detect if single sys passed by checking if it is sequence-like
if (not getattr(syslist, '__iter__', False)):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
features = np.concatenate((features, np.abs(sys.pole())))
features = np.concatenate((features, np.abs(sys.zero())))
except:
pass
# Get rid of poles and zeros at the origin
features = features[features != 0];
# Make sure there is at least one point in the range
if (features.shape[0] == 0): features = [1];
# Take the log of the features
features = np.log10(features)
#! TODO: Add a check in discrete case to make sure we don't get aliasing
# Set the range to be an order of magnitude beyond any features
omega = sp.logspace(np.floor(np.min(features))-1,
np.ceil(np.max(features))+1)
return omega
#
# KLD 5/23/11: Two functions to create nice looking labels
#
def get_pow1000(num):
'''Determine the exponent for which the significand of a number is within the
range [1, 1000).
'''
# Based on algorithm from http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg14433.html, accessed 2010/11/7
# by Jason Heeris 2009/11/18
from decimal import Decimal
from math import floor
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10()/3))
def gen_prefix(pow1000):
'''Return the SI prefix for a power of 1000.
'''
# Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,
# deca, deci, and centi).
if pow1000 < -8 or pow1000 > 8:
raise ValueError("Value is out of the range covered by the SI prefixes.")
return ['Y', # yotta (10^24)
'Z', # zetta (10^21)
'E', # exa (10^18)
'P', # peta (10^15)
'T', # tera (10^12)
'G', # giga (10^9)
'M', # mega (10^6)
'k', # kilo (10^3)
'', # (10^0)
'm', # milli (10^-3)
r'$\mu$', # micro (10^-6)
'n', # nano (10^-9)
'p', # pico (10^-12)
'f', # femto (10^-15)
'a', # atto (10^-18)
'z', # zepto (10^-21)
'y'][8 - pow1000] # yocto (10^-24)
# Function aliases
bode = bode_plot
nyquist = nyquist_plot
gangof4 = gangof4_plot
| true |
ae377e303d5e1f96c7b96ee63e849688a87e0c18 | Python | jingxinmingzhi/jingxinmingzhi | /python/pycharm/learn/xml_learn/test/xml_xmltodict.py | UTF-8 | 3,011 | 3.5 | 4 | [] | no_license | import xmltodict
from collections import OrderedDict
with open('sample.xml', 'r+', encoding='utf-8') as fp:
#将xml文件转换成dict,默认是返回OrderedDict。其中,fp.read()返回的是str
root = xmltodict.parse(fp.read(), dict_constructor=dict)
print(root)
sample = root['root']
sample['items']['item'][0]['amount'] = 200
if not sample['items']['item'][0]['owner']:
sample['items']['item'][0]['owner'] = 'alice'
sample['items']['item'][0]['update_at'] = '2019-04-13T14:23:53.193Z'
sample['items']['item'] = list(filter(lambda x: x, sample['items']['item']))
sample['items']['item'].append({"name": "pen", "price": 1.2})
#将改动写回到xml文件中
'''当打开文件后,首先用read()对文件的内容读取,然后再用write()写入,这时发现虽然是用“r+”模式打开,按道理是应该覆盖的,但是却出现了追加的情况。
这是因为在使用read后,文档的指针已经指向了文本最后,而write写入的时候是以指针为起始,因此就产生了追加的效果。
如果想要覆盖,需要先seek(0),然后使用truncate()清除后,即可实现重新覆盖写入'''
# fp.seek(0) # 指针定位到第0个字节前
# fp.truncate() # 从第0个字节以后的内容全部删除了
# fp.write(xmltodict.unparse(root, pretty=True))
# with open('sample.xml', 'r+', encoding='utf-8') as fp:
# #将xml文件转换成OrderedDict
# root = xmltodict.parse(fp.read())
# print(root)
# sample = root['root']
# sample['items']['item'][0]['amount'] = 200
# if not sample['items']['item'][0]['owner']:
# sample['items']['item'][0]['owner'] = 'alice'
# sample['items']['item'][0]['update_at'] = '2019-04-13T14:23:53.193Z'
# sample['items']['item'] = list(filter(lambda x: x, sample['items']['item']))
# sample['items']['item'].append({"name": "pen", "price": 1.2})
# # 将改动写回到xml文件中
# '''当打开文件后,首先用read()对文件的内容读取,然后再用write()写入,这时发现虽然是用“r+”模式打开,按道理是应该覆盖的,但是却出现了追加的情况。
# 这是因为在使用read后,文档的指针已经指向了文本最后,而write写入的时候是以指针为起始,因此就产生了追加的效果。
# 如果想要覆盖,需要先seek(0),然后使用truncate()清除后,即可实现重新覆盖写入'''
# fp.seek(0) #指针定位到第0个字节前
# fp.truncate() # 从第0个字节以后的内容全部删除了
# fp.write(xmltodict.unparse(root, pretty=True))
# #xmltodict包中使用#text来访问节点的text,使用 @属性名 访问节点属性,使用子节点名称(标签)访问子节点
# mydict = {
# 'text': {
# '@color':'red',
# '@stroke':'2',
# '#text':'This is a test'
# }
# }
#pretty=True,加换行
# print(xmltodict.unparse(mydict, pretty=True)) | true |
9fcd2ca1883ad775b33f2686b920a87900f23101 | Python | MrLokans/portfoliosite | /backend/apps/about_me/tests.py | UTF-8 | 2,350 | 2.578125 | 3 | [] | no_license | from django.test import TestCase
from django.urls import reverse
from .models import Project, Technology
class ProjectsAPITestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.projects_url = reverse("projects-list")
cls.technology_url = reverse("technology-list")
def _get_technologies_count(self):
return Technology.objects.count()
def _get_projects_count(self):
return Project.objects.count()
def test_technology_list_is_displayed(self):
d1 = dict(
name="unittest",
general_description="Python unit-testing framework",
mastery_level=Technology.INTERMEDIATE,
)
d2 = dict(
name="pytest",
general_description="Python unit-testing framework on steroids",
mastery_level=Technology.NOVICE,
)
Technology.objects.create(**d1)
Technology.objects.create(**d2)
resp = self.client.get(self.technology_url)
self.assertEqual(len(resp.data["results"]), self._get_technologies_count())
self.assertEqual(resp.data["results"][0], d1)
self.assertEqual(resp.data["results"][1], d2)
def test_project_list_is_displayed(self):
d1 = dict(
name="unittest",
general_description="Python unit-testing framework",
mastery_level=Technology.INTERMEDIATE,
)
d2 = dict(
name="pytest",
general_description="Python unit-testing framework on steroids",
mastery_level=Technology.NOVICE,
)
t1 = Technology.objects.create(**d1)
t2 = Technology.objects.create(**d2)
p = Project.objects.create(title="MySuperProject", description="TBD")
p.technologies.set([t1, t2])
p.save()
resp = self.client.get(self.projects_url)
self.assertEqual(len(resp.data["results"]), 1)
project = resp.data["results"][0]
self.assertEqual(project["title"], "MySuperProject")
self.assertEqual(project["description"], "TBD")
self.assertIn("technologies", project)
self.assertEqual(len(project["technologies"]), 2)
self.assertEqual(project["technologies"][0]["name"], "unittest")
self.assertEqual(project["technologies"][1]["name"], "pytest")
| true |
e4aa8dd9442b58ee1f9c4dc5a3a5ec4bbe22dc0b | Python | venkatsvpr/Problems_Solved | /LC_Path_Crossing.py | UTF-8 | 1,291 | 3.921875 | 4 | [] | no_license | """
1496. Path Crossing
Given a string path, where path[i] = 'N', 'S', 'E' or 'W', each representing moving one unit north, south, east, or west, respectively. You start at the origin (0, 0) on a 2D plane and walk on the path specified by path.
Return True if the path crosses itself at any point, that is, if at any time you are on a location you've previously visited. Return False otherwise.
Example 1:
Input: path = "NES"
Output: false
Explanation: Notice that the path doesn't cross any point more than once.
Example 2:
Input: path = "NESWW"
Output: true
Explanation: Notice that the path visits the origin twice.
Constraints:
1 <= path.length <= 10^4
path will only consist of characters in {'N', 'S', 'E', 'W}
"""
class Solution(object):
def isPathCrossing(self, path):
"""
:type path: str
:rtype: bool
"""
m = dict()
x,y = 0,0
m[(x,y)] = True
for ch in path:
if ch == "N":
x,y = x,y+1
elif ch == "S":
x,y = x,y-1
elif ch == "E":
x,y = x+1,y
elif ch == "W":
x,y = x-1,y
if ((x,y) in m):
return True
m[(x,y)] = True
return False
| true |
af3b2ecf40b688c83f20917a5940cb88ccb368c9 | Python | martinvw/e-ink-display | /e-ink-display/screens/screens.py | UTF-8 | 1,443 | 2.6875 | 3 | [] | no_license | import openhab
class Screen:
"""Base screen class."""
def __init__(self) -> None:
return
def refresh(self) -> None:
return
def button_2_label(self) -> str: return None
def button_2_handler(self) -> None:
return
def button_3_label(self) -> str: return None
def button_3_handler(self) -> None:
return
def button_4_label(self) -> str: return None
def button_4_handler(self) -> None:
return
class Heos1Screen(Screen):
"""Heos 1 Screen"""
play = None
mute = None
artist = None
def __init__(self, openhab_conn: 'openhab.client.OpenHAB') -> None:
super().__init__()
self.openhab = openhab_conn
self.refresh()
def refresh(self) -> None:
group = self.openhab.get_item("eInkHeos1Screen").members
self.play = group.get('HEOS1Control')
self.mute = self.openhab.get_item("HEOS1Mute")
def button_2_label(self) -> str:
if self.play.state == 'PLAY':
return '\uecaa'
else:
return '\uec72'
def button_2_handler(self) -> None:
if self.play.state == 'PLAY':
self.play.pause()
else:
self.play.play()
def button_4_label(self) -> str:
if self.mute.state == 'ON':
return '\uecb8'
else:
return '\uecb7'
def button_4_handler(self) -> None:
self.mute.toggle()
| true |
6b7370650d4a931697e3c06ad841a4ab809eb69b | Python | done-n-dusted/SpeechEmotionRecognition | /text_test/running_models_boW.py | UTF-8 | 2,622 | 2.734375 | 3 | [] | no_license | # training and testing on various model for BoW features
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import sys
sys.path.insert(1, '../')
from STFE import Models, DataPreparer
from tensorflow.keras import optimizers
import json
def dump_dict(dict, file_name):
with open(file_name, 'w') as convert_file:
json.dump(dict, convert_file)
DP = DataPreparer.DataPreparer('bow_features.npy')
class_names = ['anger', 'sadness']
vocab = open('vocab.txt')
vocab_size = len(vocab.readlines())
time_step = 30
sgd = optimizers.SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
cws = [1, 1.8]
class_weights = {}
for i in range(len(class_names)):
class_weights[i] = cws[i]
print(class_weights)
DP.scale_data()
print("\nOrganized data for training\n")
while(True):
model_name = input("BCLSTM or NNN or TEXTCNN\n")
if model_name == 'BCLSTM':
DP.set_timestep(time_step)
X_train, y_train, X_test, y_test, X_dev, y_dev = DP.get_matrices()
print('\nBCLSTM MODEL\n')
bclstm = Models.BC_LSTM(10, 0.3, class_names, (30, vocab_size, ))
bclstm.model_compile(sgd)
bclstm.model_fit(class_weights, 150, X_train, y_train, X_dev, y_dev, fig_name = 'bow_BCLSTM')
bclstm_metrics = bclstm.get_metrics(X_test, y_test)
dump_dict(bclstm_metrics, 'result/bclstm_bow.json')
print("METRICS\n")
print(bclstm)
break
elif model_name == 'NNN':
X_train, y_train, X_test, y_test, X_dev, y_dev = DP.get_matrices()
print('\nNEURAL NETWORK MODEL\n')
nnn = Models.NormalNeuralNetwork(0.3, class_names, (vocab_size, ))
nnn.model_compile(sgd)
nnn.model_fit(class_weights, 150, X_train, y_train, X_dev, y_dev, fig_name = 'bow_NNN')
nnn_metrics = nnn.get_metrics(X_test, y_test)
print(nnn_metrics)
dump_dict(nnn_metrics, 'result/nnn_bow.json')
print("METRICS\n")
print(nnn_metrics)
break
elif model_name == 'TEXTCNN':
DP.set_timestep(5)
X_train, y_train, X_test, y_test, X_dev, y_dev = DP.get_matrices()
print('\nTEXT CONV NEURAL NETWORK\n')
nnn = Models.TextCNN(class_names, (5, vocab_size, ))
nnn.model_compile(sgd)
nnn.model_fit(class_weights, 150, X_train, y_train, X_dev, y_dev, fig_name = 'bow_TCNN')
nnn_metrics = nnn.get_metrics(X_test, y_test)
print(nnn_metrics)
dump_dict(nnn_metrics, 'result/text_bow.json')
print("METRICS\n")
print(nnn_metrics)
break
else:
print("Invalid Model name")
| true |
b274965e80fef1cfbff76aa23487615474a73a35 | Python | prasanna1695/python-code | /3-2.py | UTF-8 | 1,143 | 4.46875 | 4 | [] | no_license | # How would you design a stack which, in addition to push and pop, also has a function min which returns the minimum element?
#Push, pop and min should all operate in O(1) time.
#you would simply need to store a variable called min and update it as needed.
class Stack:
def __init__(self):
self.minimum = None
self.stack = []
def push(self, item):
self.stack.append(item)
if self.minimum == None:
self.minimum = item
else:
self.minimum = min(self.minimum, item)
def pop(self):
if self.stack == []:
raise NameError("can't pop an empty stack")
else:
popped_data = self.stack[-1]
del self.stack[-1]
#I think this might violate the O(1) requirement
if len(self.stack) > 0:
self.minimum = min(self.stack)
else:
self.minimum = None
return popped_data
def min(self):
return self.minimum
print "Test1: push 3,2,1,5,6; ask min;"
x = Stack()
x.push(3)
x.push(2)
x.push(1)
x.push(5)
x.push(6)
print x.min() == 1
print "Test2: pop 6,5,1; ask min;"
x.pop()
x.pop()
x.pop()
print x.min() == 2
print "Test3: pop 2,3 and error"
x.pop()
x.pop()
try:
x.pop()
except NameError as e:
print 'True'
| true |
91b8a2364a38a607900f8184bf6e400eb239ffcc | Python | MarloDelatorre/leetcode | /1046_Last_Stone_Weight.py | UTF-8 | 815 | 3.5625 | 4 | [] | no_license | from heapq import heapify, heappush, heappop
from unittest import main, TestCase
class Solution():
@staticmethod
def lastStoneWeight(stones):
heap = []
for stone in stones:
heappush(heap, -stone)
while len(heap) > 1:
stone_y, stone_x = heappop(heap), heappop(heap)
if stone_x != stone_y:
heappush(heap, stone_y - stone_x)
return -heappop(heap) if heap else 0
class Test(TestCase):
def test_given_case(self):
self.assertEqual(Solution.lastStoneWeight([2, 7, 4, 1, 8, 1]), 1)
def test_empty_case(self):
self.assertEqual(Solution.lastStoneWeight([]), 0)
def test_all_same_values(self):
self.assertEqual(Solution.lastStoneWeight([2, 2, 2, 2]), 0)
if __name__ == '__main__':
main() | true |
d2192b2289eaaa64eea2216fa118469075afe155 | Python | bgoonz/UsefulResourceRepo2.0 | /_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/python_Join_Two_Lists__extend().txt.py | UTF-8 | 76 | 3.203125 | 3 | [
"MIT"
] | permissive | list1 = ["a", "b", "c"]
list2 = [1, 2, 3]
list1.extend(list2)
print(list1)
| true |
9982bc9a93696ba5d351ef4ac62bd2e05effdeb1 | Python | ismael-wael/Hospital-management-system-tkinter-GUI- | /managePatients.py | UTF-8 | 7,525 | 2.609375 | 3 | [] | no_license | from tkinter import *
import tkinter as tk
from tkinter import ttk
from GUI_Functions import *
import xlsxwriter
import xlrd
from helperFunctions import *
holdPatientData = []
headings = ["patient ID", "Dep. Name", "Doctor", "Name","Age",
"Gender", "Address", "Room number", "phone number", "diagnose"]
def addPatient(y):
global holdy
holdy = y
global addPatientWindow
addPatientWindow = Toplevel()
addPatientWindow.geometry("320x300+1000+100")
addPatientWindow.title("Add patient")
y.withdraw()
addPatientWindow.protocol("WM_DELETE_WINDOW",retriveAdminFromAdd)
global headings
#clear old data in holdPatientData
holdPatientData.clear()
row = 20
for heading in headings :
Create_label(addPatientWindow,heading + " : " ,("Times New Roman", 10) , 20, row)
entry , z = Create_Entry(addPatientWindow , 25 , 110 ,row)
holdPatientData.append(entry)
row += 20
Create_button(addPatientWindow, 15 , "Submit" , addPatientFunc
, ("Times New Roman", 10) , 20 , 240)
def deletePatient(y):
global holdy
holdy = y
global deletePatientWindow
deletePatientWindow = Toplevel()
deletePatientWindow.geometry("300x100+1000+100")
deletePatientWindow.title("Delete patient")
y.withdraw()
deletePatientWindow.protocol("WM_DELETE_WINDOW",retriveAdminFromDelete)
Create_label(deletePatientWindow,"Enter patient ID :" ,("Times New Roman", 10) , 10, 20)
global patientID
patientID , z = Create_Entry(deletePatientWindow , 25 , 120 ,20)
Create_button(deletePatientWindow, 15 , "Submit" , deletePatientFunc
, ("Times New Roman", 10) , 10 , 60)
def editPatient(y):
global holdy
holdy = y
global editPatientWindow
editPatientWindow = Toplevel()
editPatientWindow.geometry("320x350+1000+100")
editPatientWindow.title("Edit patient")
y.withdraw()
editPatientWindow.protocol("WM_DELETE_WINDOW",retriveAdminFromEdit)
Create_label(editPatientWindow,"Enter patient ID :" ,("Times New Roman", 10) , 10, 20)
global patientID
patientID , z = Create_Entry(editPatientWindow , 25 , 120 ,20)
Create_button(editPatientWindow, 15 , "Submit" , editPatientFunc
, ("Times New Roman", 10) , 10 , 60)
def displayPatient(y):
global holdy
holdy = y
global displayPatientWindow
displayPatientWindow = Toplevel()
displayPatientWindow.geometry("300x300+1000+100")
displayPatientWindow.title("Display patient")
y.withdraw()
displayPatientWindow.protocol("WM_DELETE_WINDOW",retriveAdminFromDisplay)
Create_label(displayPatientWindow,"Enter patient ID :" ,("Times New Roman", 10) , 10, 20)
global patientID
patientID , z = Create_Entry(displayPatientWindow , 25 , 120 ,20)
Create_button(displayPatientWindow, 15 , "Submit" , displayPatientFunc
, ("Times New Roman", 10) , 10 , 60)
def displayAllPatient(y):
global holdy
holdy = y
global displayAllPatientWindow
displayAllPatientWindow = Toplevel()
displayAllPatientWindow.geometry("300x300+1000+100")
displayAllPatientWindow.title("All patient")
y.withdraw()
displayAllPatientWindow.protocol("WM_DELETE_WINDOW",retriveAdminFromDisplayAll)
text = CreateTextScrollbar(displayAllPatientWindow , 300 , 300)
# Give the location of the file
loc = ("patientsRecords.xlsx")
# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
global headings
# For row 1 and column 0
row = 1
col = 0
while sheet.cell_value(row, col) != "EOF" :
while col < 10 :
text.insert("end", headings[col] + " : ")
text.insert("end", sheet.cell_value(row, col))
text.insert("end", '\n')
col += 1
text.insert("end", '******************************\n')
col = 0
row += 1
####################################################################################
def addPatientFunc():
if checkIfExist(holdPatientData[0].get(),"patientsRecords.xlsx",0):#edit existing entry
Create_label(bookAppointmentWindow,"Aleardy Exist!",("Times New Roman", 10) , 150, 240)
Create_label(bookAppointmentWindow,"(ID must be unique)",("Times New Roman",10),150,260)
else :#new entry
newEntry("patientsRecords.xlsx",holdPatientData,10)
def deletePatientFunc():
global patientID
if checkIfExist(patientID.get(),"patientsRecords.xlsx",0) :
deleteEntry(patientID.get(),"patientsRecords.xlsx",10)
#clear last writings here ("Not Found!")
Create_label(deletePatientWindow," " ,
("Times New Roman", 10) , 150, 60)
#clean writing
Create_label(deletePatientWindow,"Done!" ,("Times New Roman", 10) , 150, 60)
else:
Create_label(deletePatientWindow,"Not Found!" ,("Times New Roman", 10) , 150, 60)
def editPatientFunc():
global patientID
if checkIfExist(patientID.get(),"patientsRecords.xlsx",0) :
index = returnRowNum(patientID.get(),"patientsRecords.xlsx")
oldData = []
# Give the location of the file
loc = ("patientsRecords.xlsx")
# To open Workbook
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
col = 0
while col < 10 :
oldData.append(sheet.cell_value(index, col))
col += 1
#delete old data from fileSystem
deleteEntry(patientID.get(),"patientsRecords.xlsx",10)
#clear data in holdPatientData dict
holdPatientData.clear()
global headings
row = 90
i = 0
for heading in headings :
Create_label(editPatientWindow,heading + " : " ,("Times New Roman", 10) , 10, row)
x , z = Create_Entry(editPatientWindow , 25 , 110 ,row)
holdPatientData.append(x)
x.set(oldData[i])
row += 20
i += 1
Create_button(editPatientWindow, 15 , "Submit" , addPatientFunc
, ("Times New Roman", 10) , 10 , 300)
else:
Create_label(editPatientWindow,"Not Found!" ,("Times New Roman", 10) , 150, 60)
def displayPatientFunc():
global patientID
if checkIfExist(patientID.get(),"patientsRecords.xlsx",0) :
global headings
printRow(displayPatientWindow ,returnRowNum(patientID.get(),"patientsRecords.xlsx")
,"patientsRecords.xlsx",headings,10)
else:
Create_label(displayPatientWindow,"Not Found!" ,("Times New Roman", 10) , 150, 60)
###########################################################################
def retriveAdminFromAdd():
global holdy
global addPatientWindow
holdy.deiconify()
addPatientWindow.destroy()
def retriveAdminFromEdit():
global holdy
global editPatientWindow
holdy.deiconify()
editPatientWindow.destroy()
def retriveAdminFromDelete():
global holdy
global deletePatientWindow
holdy.deiconify()
deletePatientWindow.destroy()
def retriveAdminFromDisplay():
global holdy
global displayPatientWindow
holdy.deiconify()
displayPatientWindow.destroy()
def retriveAdminFromDisplayAll():
global holdy
global displayAllPatientWindow
holdy.deiconify()
displayAllPatientWindow.destroy()
| true |
4f6652f3a38bf843521c85f34ed599202abc4585 | Python | miroslavpetkovic/python-meme-generator-project | /src/app.py | UTF-8 | 2,617 | 2.890625 | 3 | [] | no_license | import random
import os
import requests
from flask import Flask, render_template, abort, request
from MemeEngine import MemeEngine
from QuoteEngine import Importer
from QuoteEngine import QuoteModel
dir_path = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__, static_folder=dir_path)
meme = MemeEngine(dir_path)
def setup():
""" Load all resources """
quote_files = ['./_data/DogQuotes/DogQuotesTXT.txt',
'./_data/DogQuotes/DogQuotesDOCX.docx',
'./_data/DogQuotes/DogQuotesPDF.pdf',
'./_data/DogQuotes/DogQuotesCSV.csv']
# TODO: Use the Ingestor class to parse all files in the
# quote_files variable
quotes = []
for quote_file in quote_files:
quotes.extend(Importer.parse(quote_file))
images_path = "./_data/photos/dog/"
# TODO: Use the pythons standard library os class to find all
# images within the images images_path directory
imgs = [images_path + x for x in os.listdir(images_path)]
return quotes, imgs
quotes, imgs = setup()
@app.route('/')
def meme_rand():
""" Generate a random meme """
# Use the random python standard library class to:
# 1. select a random image from imgs array
# 2. select a random quote from the quotes array
img = random.choice(imgs)
quote = random.choice(quotes)
path = meme.make_meme(img, quote.body, quote.author)
print(path)
return render_template('meme.html', path=path)
@app.route('/create', methods=['GET'])
def meme_form():
""" User input for meme information """
return render_template('meme_form.html')
@app.route('/create', methods=['POST'])
def meme_post():
""" Create a user defined meme """
# 1. Use requests to save the image from the image_url
# form param to a temp local file.
# 2. Use the meme object to generate a meme using this temp
# file and the body and author form paramaters.
# 3. Remove the temporary saved image.
image_url = request.form.get('image_url')
r = requests.get(image_url)
tmp = dir_path+'/'+str(random.randint(0, 100000000))+'.png'
with open(tmp, 'wb') as f:
f.write(r.content)
if request.form.get('body') != "" and request.form.get('author') != "":
quote = QuoteModel(request.form.get('body'),
request.form.get('author'))
else:
quote = random.choice(quotes)
path = meme.make_meme(tmp, quote.body, quote.author)
# os.remove(tmp)
print(path)
return render_template('meme.html', path=path)
if __name__ == "__main__":
app.run()
| true |
d10b64e47a1a45c45b52bebb0c862311466b4165 | Python | MithVert/P5 | /model/categorie.py | UTF-8 | 1,288 | 2.75 | 3 | [] | no_license | import mysql.connector
class Categorie():
def __init__(self, sqlmng, idc=None, name=None):
self.sqlmng = sqlmng
self.id = idc
self.name = name
self.valid = True
def update(self):
query = (
"SELECT id, Categorie FROM Categories "
f"WHERE Categorie = '{self.name}'"
)
try:
cur = self.sqlmng.cnx.cursor(dictionary=True)
cur.execute(query)
result = [row for row in cur]
except mysql.connector.errors.ProgrammingError:
self.valid = False
result = True
if not result:
query = (
"INSERT INTO Categories (Categorie) VALUES (%s)"
)
value = (self.name,)
cur = self.sqlmng.cnx.cursor()
cur.execute(query, value)
self.sqlmng.cnx.commit()
self.id = cur.lastrowid
elif self.valid:
self.id = result[0]["id"]
def updaterelation(self, product):
query = (
"INSERT INTO Relations VALUES (%s, %s)"
)
values = (product.id, self.id)
cur = self.sqlmng.cnx.cursor()
cur.execute(query, values)
self.sqlmng.cnx.commit()
def getproducts(self):
pass
| true |
a401af168065db964e42d4b9b78c07ccd3b31fee | Python | melwinjose1991/LearningMachineLearning | /python/learning - tensor-flow/Basics/linear_regression.py | UTF-8 | 3,529 | 3.625 | 4 | [] | no_license | # from : https://github.com/nlintz/TensorFlow-Tutorials/blob/master/01_linear_regression.py
import tensorflow as tf
import numpy as np
'''
linspace(): Returns 101 evenly spaced samples, calculated over the interval [-1, 1].
'''
trX = np.linspace(-1, 1, 101)
print(trX)
'''
randn(): Return a sample (or samples) from the 'standard normal' distribution
mean=0 AND std.dev=1
create a y value which is approximately linear but with some random noise
0.33 = std.dev
2*trX = mean
trX.shape = n = 101
'''
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33
X = tf.placeholder("float") # create symbolic variables
Y = tf.placeholder("float")
'''
tf.Vairable(<init-value>, name=<name>, trainable=True)
When you train a model, you use variables to hold and update parameters. Variables
are in-memory buffers containing tensors. They must be explicitly initialized and
can be saved to disk during and after training. You can later restore saved values
to exercise or analyze the model.
'''
# create a shared variable for the weight matrix
w = tf.Variable(0.0, name="weights")
'''
Variable initializers must be run explicitly before other ops in your model
can be run. The easiest way to do that is to add an op that runs all the
variable initializers, and run that op before using the model.
'''
init_op = tf.global_variables_initializer()
# use square error for cost_function function
cost_function = tf.square(Y - tf.multiply(X, w))
'''
The Optimizer base class provides methods to compute gradients for a loss
and apply gradients to variables. A collection of subclasses implement
classic optimization algorithms such as GradientDescent and Adagrad.
You never instantiate the Optimizer class itself, but instead instantiate
one of the subclasses.
minimize(loss, var_list=None)
Calling minimize() takes care of both computing the gradients and applying
them to the variables.
- loss: A Tensor containing the value to minimize.
- var_list: Optional list of Variable objects to update to minimize loss.
Defaults to the list of variables collected in the graph under the key
GraphKeys.TRAINABLE_VARIABLES.
'''
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost_function)
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize variables (in this case just variable W)
sess.run(init_op)
for i in range(25):
for (x, y) in zip(trX, trY):
sess.run(train_op, feed_dict={X: x, Y: y})
print("W:",sess.run(w),"after i:",i)
print(sess.run(w))
'''
Session.run(fetches, feed_dict=None, options=None, run_metadata=None)
Runs operations and evaluates tensors in fetches.
This method runs one "step" of TensorFlow computation, by running the necessary graph
fragment to execute every Operation and evaluate every Tensor in fetches, substituting
the values in feed_dict for the corresponding input values.
The fetches argument may be a single graph element, or an arbitrarily nested list,
tuple, namedtuple, dict, or OrderedDict containing graph elements at its leaves.
A graph element can be one of the following types:
- An tf.Operation: The corresponding fetched value will be None.
- A tf.Tensor: The corresponding fetched value will be a numpy ndarray containing
the value of that tensor.
feed_dict argument allows the caller to override the value of tensors in the graph
''' | true |
c78751529ba42622d1110bd1267e453478c57ac9 | Python | p-jacquot/ISN | /test.py | UTF-8 | 2,633 | 2.578125 | 3 | [] | no_license | # Créé par PJACQUOT, le 21/03/2016 en Python 3.2
import pygame
from jeu import Jeu
from fenetre import Fenetre
from molecule import Molecule
from dialogue import Dialog
from niveau import Niveau
import constantes
from pattern import *
import pickle
import niveau
def testplay():
jeu.moleculeJoueur = Molecule('hydrogene.png', Pattern(0,0))
jeu.moleculeJoueur.posX = constantes.largeur/2
jeu.moleculeJoueur.posY = constantes.hauteur-35
jeu.moleculeJoueur.rect = jeu.moleculeJoueur.rect.move(jeu.moleculeJoueur.posX, jeu.moleculeJoueur.posY)
jeu.moleculeJoueur.hp = 10
jeu.vitesse = 4.5
"""jeu.ennemyList.append(Molecule('azote.png', PatternCercle(150,60,25,4,2)))
jeu.ennemyList.append(Molecule('oxygene.png', PatternZigZag(20,1)))
jeu.ennemyList.append(Molecule('carbone.png', PatternPolynome(1,1,1)))"""
#jeu.ennemyList.append(Molecule('cortizone.png', Pattern(0,0)))
"""for a in jeu.ennemyList:
a.posX =randint(15,200)
a.posY = randint(15,200)"""
#jeu.ennemyList.append(Molecule('hydrogene.png', PatternSinusoidal(5,1)))
jeu.progressInLevel()
def testDialog():
#ricken = pygame.image.load("resources/temporaire/Ricken.png").convert_alpha()
#tharja = pygame.image.load("resources/temporaire/Tharja.png").convert_alpha()
dialogue = Dialog("resources/temporaire/Tharja.png", "Tharja", (10, 210), "resources/temporaire/Ricken.png", "Ricken", (500, 200))
dialogue.punchlineList.append(["Il semblerait que mon sort n'ait pas fonctionné...", 0])
dialogue.punchlineList.append(["Hein ? Tu as dit quelque chose ?", 1])
dialogue.punchlineList.append(["Non non... Rien... hé hé...", 0])
with open('dialogue.pickle', 'wb') as file:
pickle.dump(dialogue, file)
jeu.dialoguer(dialogue)
def testSerializedDialogue():
with open('resources/temporaire/dialogue.pickle', 'rb') as file:
dialogue = pickle.load(file)
jeu.dialoguer(dialogue)
pygame.init()
pygame.mixer.init()
fenetre = Fenetre("test ISN Dialogue", constantes.largeur, constantes.hauteur)
fenetre.fond = pygame.image.load("resources/galaxie.jpg").convert_alpha()
with open('resources/niveau/1/firstDialog.pickle', 'rb') as file:
firstDialog = pickle.load(file)
with open('resources/niveau/1/middleDialog.pickle', 'rb') as file:
middleDialog = pickle.load(file)
with open('resources/niveau/1/lastDialog.pickle', 'rb') as file:
lastDialog = pickle.load(file)
jeu = Jeu(fenetre,Niveau(1),0,0)
for explode in constantes.explodeList:
explode = explode.convert_alpha()
#testDialog()
#testSerializedDialogue()
testplay()
pygame.quit()
| true |
1876e5b09b664ef0b353670e137950d3e1270558 | Python | wammar/wammar-utils | /convert-conll-format-to-sent-per-line.py | UTF-8 | 1,451 | 3.015625 | 3 | [] | no_license | import io
import argparse
# parse/validate arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--input_filename", required=True)
argparser.add_argument("-o", "--output_filename", required=True)
argparser.add_argument("-d", "--delimiter", default="_")
argparser.add_argument("-c", "--columns", default="2,5", help="comma-delimited list of column numbers (one-based) to be copy for each token to the output file.")
args = argparser.parse_args()
columns = args.columns.split(',')
columns = [int(column)-1 for column in columns]
with io.open(args.input_filename, encoding='utf8') as input_file, io.open(args.output_filename, encoding='utf8', mode='w') as output_file:
tokens = []
for in_line in input_file:
# is this the end of a sentence?
in_line = in_line.strip()
if len(in_line) == 0:
if len(tokens):
output_file.write(' '.join(tokens) + u'\n')
tokens = []
continue
# parse conll line
fields = in_line.split('\t')
selected_fields = [fields[column] for column in columns]
for selected_field in selected_fields:
if args.delimiter in selected_field:
print 'WARNING: one of the selected fields "' + selected_field + '" already contains the designated delimiter "' + args.delimiter + '"'
token = args.delimiter.join(selected_fields)
tokens.append(token)
# writ the last sentence
if len(tokens):
output_file.write(' '.join(tokens) + u'\n')
| true |
fcd3e0cde7fec1d734cb945d7041906be2618a09 | Python | Deepaklal123/Python | /Chapter_02/prac_q_04_input_function.py | UTF-8 | 220 | 3.6875 | 4 | [] | no_license | #Author: Deepak Lal
# Sukkur IBA University
a= input(" Enter your name ") #This alwaays takes inpt as string
print(a)
num1= input(" Enter your age ") #This alwaays takes inpt as string
num1=int(num1)
print(num1) | true |
998e4c5ab35b65a3e242d0ef51809c2211d9861b | Python | gz5678/CrypticCrosswordSolver | /CrypticSolver.py | UTF-8 | 3,713 | 3.84375 | 4 | [] | no_license | import string
from SolutionFormat import SolutionFormat
from ClueSolver import solve
def CrypticSolver():
print_header()
run = True
while run:
# Get the clue, strip punctuation and change to lower case
clue_str = input("Insert the clue:\n").translate(str.maketrans('', '', string.punctuation))
clue = [word.lower() for word in clue_str.split(" ")]
length_str = input("Insert the number of letters in the solution:\n")
if length_str == "":
solution_format = None
else:
try:
# Get lengths of words
split = length_str.split(",")
lengths = [int(length) for length in split]
except ValueError:
print("Illegal input. Format will be ignored when trying to solve the clue.")
lengths = []
if len(lengths) > 0:
format_str = input("Insert the known letters in the solution.\n")
try:
# Get known letters
solution_format = SolutionFormat(len(lengths), lengths, format_str)
except ValueError:
print("Illegal input. Format will be ignored when trying to solve the clue.")
solution_format = None
else:
solution_format = None
solutions = solve(clue, solution_format)
print_solutions(solutions)
run = print_end()
def print_header():
print("Welcome to our Cryptic Clue Solver 1.0.\n"
"This program allows you to get help with your cryptic crossword, with others being judgemental of your abilities.\n"
"----------------------------------------------------------------------------------------------\n"
"Guide for the program:\n"
"The first part is simple, just enter your clue and press enter.\n"
"Then you would need to insert anything you know about the solution. First, insert the number of letters in the solution.\n"
"If the solution has multiple words, simply enter their lengths by order, separated by a comma (,).\n"
"If you don't know the number of letters, don't worry, we still have you covered. Just press enter and our program will try to solve the clue anyway.\n"
"If you have entered the number of letters, not you can enter the letters you already know.\n"
"Simply write down the entire solution, with an underscore (_) replacing any letter you don't know.\n"
"Make sure to add spaces between words and to add the right number of letters and underscores.\n"
"If you don't know any letter, you can simply press enter.\n"
"Okay, we are done with the explanations. You can now start trying out our program. Press enter to start.")
input()
print("----------------------------------------------------------------------------------------------")
def print_end():
i = input("To quit insert 'quit'. To continue press enter.\n")
if i == 'quit':
print("Thank you for using our Cryptic Clue Solver 1.0!")
return False
else:
print("----------------------------------------------------------------------------------------------")
return True
def print_solutions(solutions):
if len(solutions) == 0:
print("No plausible solutions found.")
return
print("The best possible solution we found was '%s'" % solutions[0][0])
print("Match score: %s" % solutions[0][1])
if len(solutions) > 1:
print("Other possible solutions include:")
for word, score in solutions[1:]:
print(word + " score: %s" % score)
CrypticSolver()
| true |
4c432c358c6749b558bb294829cc4b3187b4cfdd | Python | ChernenkoSergey/Supervised-and-Unsupervised-Learning-with-Python | /Раздел 5 Создание систем рекомендаций/pipeline_trainer.py | UTF-8 | 3,473 | 2.984375 | 3 | [] | no_license | from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
from sklearn.ensemble import ExtraTreesClassifier
# Генерируем некоторые помеченные образцы данных для обучения и тестирования
# Scikit-learn имеет встроенную функцию, которая обрабатывает его.
# Создаем 150 точек данных, где каждая точка данных является 25-мерным вектором признаков.
# Цифры в каждом объекте будут генерироваться с использованием генератора случайных выборок.
# Каждая точка данных имеет 6 информационных функций и не имеет избыточных функций
X, y = samples_generator.make_classification(n_samples=150,
n_features=25, n_classes=3, n_informative=6,
n_redundant=0, random_state=7)
# Первый блок в конвейере - это селектор функций, этот блок выбирает лучшие функции K
# Устанавливаем значение K в 9
k_best_selector = SelectKBest(f_regression, k=9)
# Следующий блок в конвейере является чрезвычайно случайным классификатором леса с 60 оценками и максимальной глубиной 4
classifier = ExtraTreesClassifier(n_estimators=60, max_depth=4)
# Строим конвейер путем объединения отдельных блоков, которые создали
# Можем назвать каждый блок так, чтобы его было легче отслеживать
processor_pipeline = Pipeline([('selector', k_best_selector), ('erf', classifier)])
# Можем изменить параметры отдельных блоков, изменяем значение K на 7 и количество оценок до 30
# Используем имена, которые назначили в предыдущей строке, чтобы определить область
processor_pipeline.set_params(selector__k=7, erf__n_estimators=30)
# Обучаем конвейер, используя данные образца, которые сгенерировали ранее
processor_pipeline.fit(X, y)
# Предсказываем результат для всех входных значений и распечатываем его
output = processor_pipeline.predict(X)
print("\nPredicted output:\n", output)
# Вычисляем счет, используя маркированные данные обучения
print("\nScore:", processor_pipeline.score(X, y))
# Хотим извлечь функции, выбранные блоком селектора. Указали, что нам нужно выбрать 7 функций из 25
# Распечатываем функции, выбранные селектором конвейера
status = processor_pipeline.named_steps['selector'].get_support()
# Извлекаем и распечатываем индексы выбранных функций
selected = [i for i, x in enumerate(status) if x]
print("\nIndices of selected features:", ', '.join([str(x) for x in selected])) | true |
fb3464cda5378ddbe8a14e0e8718c2f4b948f605 | Python | austinlyons/computer-science | /heap/python/heap.py | UTF-8 | 4,375 | 3.828125 | 4 | [] | no_license | from math import floor
class Heap:
def _left(self, i):
return 2*i + 1
def _right(self, i):
return 2*i + 2
def _parent(self, i):
return int(floor((i-1)/2))
def _swap(self, A, i, j):
temp = A[i]
A[i] = A[j]
A[j] = temp
def _valid(self, i):
if i >= len(self._heap):
raise Exception("index i is >= heap length")
key = self._heap[i]
l = self._left(i)
# if left exists but is bigger than it's parent: invalid
if l < len(self._heap) and self._heap[l] > key:
return False
r = self._right(i)
if r < len(self._heap) and self._heap[r] > key:
return False
# node has no children, has a valid left child,
# or has valid left and right children
return True
def _heapify(self, heap, i, heap_size=None):
if not heap_size:
heap_size = len(heap)
l = self._left(i)
r = self._right(i)
largest = i
if l < heap_size and heap[l] > heap[i]:
largest = l
if r < heap_size and heap[r] > heap[largest]:
largest = r
if largest is not i:
self._swap(heap, i, largest)
self._heapify(heap, largest, heap_size)
def _increase_key(self, i, key):
if key < self._heap[i]:
raise Exception('new key is smaller than current key')
self._heap[i] = key
while i > 0 and self._heap[self._parent(i)] < self._heap[i]:
self._swap(self._heap, i, self._parent(i))
i = self._parent(i)
def _build_heap(self, arr):
for i in range(len(arr)/2, -1, -1):
self._heapify(arr, i)
def __init__(self, arr=[]):
# use list() to copy input so we don't mutate it
self._heap = list(arr)
self._build_heap(self._heap)
def heap_sort(self):
"""
We'll leave self._heap as it is (a heap).
So we create a copy of self._heap, sort it,
and return the sorted array
"""
heap = list(self._heap)
heap_size = len(heap)
self._build_heap(heap)
for i in range(heap_size - 1, 0, -1):
self._swap(heap, 0, i)
heap_size -= 1
self._heapify(heap, 0, heap_size)
return heap
def get_heap(self):
return self._heap
def get_max(self):
return self._heap[0]
def remove_max(self):
length = len(self._heap)
if length < 1:
return None
maximum = self._heap[0]
self._heap[0] = self._heap[length-1]
del self._heap[length-1]
self._heapify(self._heap, 0)
return maximum
def insert(self, key):
self._heap.append(float("-inf"))
self._increase_key(len(self._heap)-1, key)
def is_heap(self):
"""
Check that heap property is still satisfied.
"""
length = len(self._heap)
if length == 0 or length == 1:
return True
# the tree-like nature of the heap makes me
# think of using recursion to traverse it,
# but since it's an array I'll just sequentially
# check each element
for i in range(length):
if not self._valid(i):
return False
return True
if __name__ == '__main__':
arr = [4, 1, 3, 2, 16, 9, 10, 14, 8, 7]
print 'before building heap\t%s' % arr
h = Heap(arr)
print 'after building heap\t%s' % h.get_heap()
assert h.is_heap() == True
assert h.get_heap() == [16, 14, 10, 8, 7, 9, 3, 2, 4, 1]
print 'heap before removing max\t%s' % h.get_heap()
m = h.remove_max()
print 'after removing max\t%s' % h.get_heap()
print 'max was %s' % m
assert m == 16
assert h.is_heap() == True
assert h.get_heap() == [14, 8, 10, 4, 7, 9, 3, 2, 1]
print 'heap before sort\t%s' % h.get_heap()
sorted_arr = h.heap_sort()
print 'array after heap sort\t%s' % sorted_arr
assert sorted_arr == [1, 2, 3, 4, 7, 8, 9, 10, 14]
assert h.is_heap() == True
print 'heap before inserting 11\t%s' % h.get_heap()
assert h.get_heap() == [14, 8, 10, 4, 7, 9, 3, 2, 1]
h.insert(11)
print 'heap after inserting 11\t%s' % h.get_heap()
assert h.is_heap() == True
assert h.get_heap() == [14, 11, 10, 4, 8, 9, 3, 2, 1, 7]
| true |
c50bf8fcaf38c8f91d3f1743062e2597c1ee27b7 | Python | foersterrobert/Pokemon-TD | /bullet.py | UTF-8 | 958 | 3.21875 | 3 | [] | no_license | from settings import *
import pygame
class Bullet:
def __init__(self, screen, x, y, ex, ey, bsize, imgB=None):
self.screen = screen
self.x = x
self.y = y
self.ex = ex
self.ey = ey
self.bsize = bsize
self.imgB = imgB
self.image = None
if self.imgB:
if self.imgB == 'Fire':
self.image = pygame.image.load("./images/bullets/fire.png")
elif self.imgB == 'Lazer':
self.image = pygame.image.load("./images/bullets/Lazer.png")
self.image = pygame.transform.scale(self.image, (bsize*2, bsize*2))
self.rect = self.image.get_rect()
def draw(self):
if self.image:
self.rect.centerx = self.x
self.rect.centery = self.y
self.screen.blit(self.image, self.rect)
else:
pygame.draw.circle(self.screen, (245,245,220), (self.x, self.y), self.bsize) | true |
541096039db4bb40bcadf12285b0e936fef5d98d | Python | haoruizh/CS322Project | /chatProject/server/User_dic.py | UTF-8 | 959 | 2.6875 | 3 | [] | no_license | from socket import *
import json
import os
import openpyxl
class User:
filename = 'C://Users/Jihui/Documents/GitHub/CS322Project/chatProject/server/user.txt'
user_info = {}
def __init__(self):
pass
def show_profile(self, userName):
print(self.user_info[userName])
return self.user_info[userName]
def init_profile(self, userName, sex, birth):
if userName not in self.user_info:
self.user_info[userName] = {"sex": sex, "birthday":birth}
json.dump(self.user_info, open(self.filename, 'w'))
else:
print("ID error")
def test(self):
User().init_profile('Jihui', 'male', '1994')
User().init_profile('Sheng', 'male', '2020')
User().init_profile('what', 'male', '2220')
# def edit_profile(self, ):
# pass
# def get_notify(self):
# pass
if __name__ == '__main__':
User().test()
User().show_profile('Jihui') | true |
83dc2ad21ac34878de0b801df102eb7803fa31d3 | Python | anthony-chang/machine-learning-playground | /housingPrices.py | UTF-8 | 655 | 2.953125 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/predicting-house-prices/problem
from sklearn import linear_model
import numpy as np
features, N = (int(n) for n in input().split())
x_train = []
y_train = []
x_test = []
x_train = [0 for i in range(N)]
for i in range(N):
x_train[i] = list(map(float, input().split()))
x_train = np.array(x_train)
y_train = x_train[:, features]
x_train = x_train[:, 0:features]
model = linear_model.LinearRegression()
model.fit(x_train, y_train)
T = int(input())
x_test = [0 for i in range(T)]
for i in range(T):
x_test[i] = list(map(float, input().split()))
for i in range(T):
print(model.predict([x_test[i]])[0])
| true |
cb788dbfc49bdf215aedd7f3e1dc90fe8a5b7077 | Python | kate-codebook/movie_recommendersys | /itemBased.py | UTF-8 | 1,213 | 3.28125 | 3 | [] | no_license | import pandas as pd
import ast
def create_item_based_rating(movies): # movies type dict
movies = str(movies)
rating_data = pd.read_csv('ratings.csv')
movie_data = pd.read_csv('movies.csv')
user_movie_rating = pd.merge(rating_data, movie_data, on='movieId')
user_movie_rating_p = user_movie_rating.pivot_table('rating', index='userId', columns='title').fillna(0)
fav_movie = []
movies_dict = ast.literal_eval(movies)
for item in movies_dict:
if int(movies_dict.get(item)) >= 3:
fav_movie.append(item)
print("movies to search similar movies: ", fav_movie)
final_df = pd.DataFrame()
for movie in fav_movie:
result = sim_cal(user_movie_rating_p, movie)
similar_movie_list = pd.DataFrame(data=result[movie].sort_values(ascending=False)[1:11]).index.tolist() #display top ten similar movies
df = pd.DataFrame(data = similar_movie_list, columns = [movie])
final_df = pd.concat([final_df, df], axis = 1)
return final_df
def sim_cal(df, movie): # movie that we want to calculate cos_sim with other movies
df = df[df[movie] != 0] # delete row that contain 0 in the movie col
return df.corr(method='pearson') | true |
90b1d8b52dbaa41f051a98d21c16bbf64d04a5b0 | Python | ungerw/class-work | /ch6ex5.py | UTF-8 | 97 | 2.59375 | 3 | [] | no_license | str = 'X-DSPAM-Confidence:0.8475'
mark = str.find(':')
number = float(str[mark+1:])
print(number) | true |
1210dcf6d176ad6bc7941c1c25bafc38ce022fcf | Python | msetkin/udacity_streaming | /consumers/models/lines.py | UTF-8 | 2,126 | 2.671875 | 3 | [] | no_license | """Contains functionality related to Lines"""
import json
import logging
from models import Line
from ksql import TURNSTILE_SUMMARY_TABLE
logger = logging.getLogger(__name__)
class Lines:
"""Contains all train lines"""
def __init__(self):
"""Creates the Lines object"""
self.red_line = Line("red")
self.green_line = Line("green")
self.blue_line = Line("blue")
def process_message(self, message):
"""Processes a station message"""
if "com.streaming.produce.station" in message.topic() or "org.chicago.cta.stations.table.v1" in message.topic():
value = message.value()
if message.topic() == "org.chicago.cta.stations.table.v1":
value = json.loads(value)
if value["line"] == "green":
self.green_line.process_message(message)
elif value["line"] == "red":
self.red_line.process_message(message)
elif value["line"] == "blue":
self.blue_line.process_message(message)
else:
logger.debug("discarding unknown line msg %s", value["line"])
elif message.topic() == TURNSTILE_SUMMARY_TABLE:
logger.debug(f"message.topic {message.topic()}, message.value {message.value()}")
try:
json_data = json.loads(message.value())
except ValueError: # includes simplejson.decoder.JSONDecodeError
logger.error(f"Decoding JSON has failed")
line_color = json_data.get("LINE_COLOR")[0]
logger.debug(f"line_color: {line_color}")
if line_color == "red":
self.red_line.process_message(message)
elif line_color == "green":
self.green_line.process_message(message)
elif line_color == "blue":
self.blue_line.process_message(message)
else:
logger.error(f"unknown color: {line_color}")
else:
logger.info("ignoring non-lines message %s", message.topic())
| true |
d397eaf5dd020a8124d1a7f68af30c3339ad6a93 | Python | dingzhaohan/deep_research | /spiders/git/git/spiders/littlegit.py | UTF-8 | 3,366 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import scrapy
from git.items import GitItem
import pandas as pd
import json
import time
import datetime
# df = pd.read_json("/home/zhaohan/Desktop/research/lastdata_papers_with_code_repo.json")
df = pd.read_json('/Users/zhaohan/Desktop/deep_research/data/links-between-papers-and-code.json')
'''
repo = set()
for i in range(len(df)):
for url in df["repo_url"][i]:
repo.add(url)
'''
class LittlegitSpider(scrapy.Spider):
name = 'littlegit'
allowed_domains = ['github.com']
start_urls = []
'''
for i in repo:
url = "https://api.github.com/repos" + i[18:] + "?client_id=a26c83afeb1a41304d10&client_secret=ea8586a6b1d16c9f645112fd04b5bf57f5bae88e"
start_urls.append(url)
'''
for i in range(len(df)):
url = "https://api.github.com/repos" + df["repo_url"][i][18:] + "?client_id=a26c83afeb1a41304d10&client_secret=ea8586a6b1d16c9f645112fd04b5bf57f5bae88e"
start_urls.append(url)
def parse(self, response):
index = self.start_urls.index(response.url)
item = GitItem()
sites = json.loads(response.body_as_unicode())
item["paper_title"] = df["paper_title"][index]
item["repo_size"] = sites["size"]
# item["repo_name"] = sites["name"]
item["repo_url"] = sites["html_url"]
item["git_watch"] = sites["subscribers_count"]
item["git_fork"] = sites["forks_count"]
item["git_star"] = sites["stargazers_count"]
item["repo_created_at"] = sites["created_at"][:10]
item["repo_updated_at"] = sites["updated_at"][:10]
item["repo_kept_time"] = caltime(item["repo_created_at"], item["repo_updated_at"])
item["open_issues_count"] = sites["open_issues_count"]
url1 = response.url.replace('?client_id=a26c83afeb1a41304d10&client_secret=ea8586a6b1d16c9f645112fd04b5bf57f5bae88e','/issues?client_id=a26c83afeb1a41304d10&client_secret=ea8586a6b1d16c9f645112fd04b5bf57f5bae88e')
#yield scrapy.Request(url1, meta={"item":item}, callback=self.detail_parse1)
yield item
def detail_parse1(self, response):
sites = json.loads(response.body_as_unicode())
item = response.meta["item"]
try:
item["latest_issues_created_at"] = sites[0]["created_at"][:10]
except:
item["latest_issues_created_at"] = None
try:
item["latest_issues_updated_at"] = sites[0]["updated_at"][:10]
except:
item["latest_issues_updated_at"] = None
print(response.url)
#url2 = response.url.replace('?client_id=a26c83afeb1a41304d10&client_secret=ea8586a6b1d16c9f645112fd04b5bf57f5bae88e','/contents/README.md?client_id=a26c83afeb1a41304d10&client_secret=ea8586a6b1d16c9f645112fd04b5bf57f5bae88e')
#yield scrapy.Request(url2, meta={"item": item}, callback=self.detail_parse2)
def detail_parse2(self, response):
sites = json.loads(response.body_as_unicode())
item = response.meta["item"]
try:
item["readme_size"] = sites["size"]
except:
url = response.url.replace("README", "readme")
yield scrapy.Request(url, meta={"item":response.meta["item"]}, callback=self.detail_parse2)
return item
def caltime(date1, date2):
date1 = time.strptime(date1, "%Y-%m-%d")
date2 = time.strptime(date2, "%Y-%m-%d")
date1 = datetime.datetime(date1[0], date1[1], date1[2])
date2 = datetime.datetime(date2[0], date2[1], date2[2])
return str((date2 - date1)).replace(" days, 0:00:00", "")
| true |
100f0252883b40d7eb2501a04338306c06ed6794 | Python | SimmonsChen/LeetCode | /公司真题/顺丰/不要1.py | UTF-8 | 1,027 | 3.40625 | 3 | [] | no_license | def helper(n):
while n > 0:
if n % 10 != 1:
return False
n = n // 10
return True
def isHaveOne(n):
if n == 1: return True
if n < 10: return False
cur = n # 保留原数字
tar = []
while cur > 0:
t = cur % 10
if t == 1: return True
tar.append(t)
cur = cur // 10
print("n的构成:", tar)
size = len(tar) # n是几位数
temp = []
for i in range(2, size + 1):
temp.append(int("1" * i))
temp.sort(reverse=True) # 降序排列
print("预备因数:", temp)
k = 0 # 遍历指针
while n > 0 and k < len(temp):
if helper(n): return True
if n % temp[k] == 0: return True
if n < temp[k]: # 考虑n小于首个数的情况
k += 1
continue
n = n % temp[k] # 减到不能再减
if n in temp: return True
k += 1
return False
while 1:
s = input()
if not s:
break
else:
print(isHaveOne(int(s)))
| true |
d12747e228c13b95ea4c87b58b391179d65d8220 | Python | iblezya/Python | /Semana 2/Cuarentena/cond8.py | UTF-8 | 859 | 3.78125 | 4 | [] | no_license | Nombre = str(input('Ingrese el nombre del producto: '))
while (True):
try:
Precio = float(input('Ingrese el precio del producto(S/.): '))
Cantidad = int(input('Ingrese la cantidad de productos: '))
Monto = Precio*Cantidad
if Cantidad >= 100:
MontoFinal = 0.6*Monto
elif 100 > Cantidad >= 25:
MontoFinal = 0.8*Monto
elif 25 > Cantidad >= 10:
MontoFinal = 0.9*Monto
elif 10 > Cantidad >= 1:
MontoFinal = Monto
else:
print('Mínima cantidad permitida: 1. Intente de nuevo.')
continue
break
except ValueError:
print('Error. Intente de nuevo.')
print('\nUsted acaba de comprar: ',Nombre)
print('Cantidad: ',Cantidad,'productos.')
print('Monto final a pagar: S/.',MontoFinal) | true |
69d0d49788987a148607933c3f188bea27469e90 | Python | muskanmahajan37/python-scic | /sesion_3/resorte.py | UTF-8 | 294 | 2.96875 | 3 | [] | no_license | import math
A = 10
j = 1
k = 3
m = 1
def xf(t):
w = (k / m) ** 0.5
return A * math.sin(w * t + j)
f = open("resorte.csv", "w")
n = 100
t_min = 0
t_max = 4
for i in range(n):
t = t_min + (t_max - t_min) / (n - 1) * i
x = xf(t)
f.write("{}, {}\n".format(t, x))
f.close() | true |
d1e2a7a35b02158767334621fab48c736e364d3d | Python | barry-jin/array-api-tests | /array_api_tests/special_cases/test_atan2.py | UTF-8 | 12,415 | 3.03125 | 3 | [
"MIT"
] | permissive | """
Special cases tests for atan2.
These tests are generated from the special cases listed in the spec.
NOTE: This file is generated automatically by the generate_stubs.py script. Do
not modify it directly.
"""
from ..array_helpers import (NaN, assert_exactly_equal, exactly_equal, greater, infinity, isfinite,
less, logical_and, logical_or, zero, π)
from ..hypothesis_helpers import numeric_arrays
from .._array_module import atan2
from hypothesis import given
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_either(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If either `x1_i` or `x2_i` is `NaN`, the result is `NaN`.
"""
res = atan2(arg1, arg2)
mask = logical_or(exactly_equal(arg1, NaN(arg1.shape, arg1.dtype)), exactly_equal(arg2, NaN(arg1.shape, arg1.dtype)))
assert_exactly_equal(res[mask], (NaN(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_greater__equal_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is greater than `0` and `x2_i` is `+0`, the result is an implementation-dependent approximation to `+π/2`.
"""
res = atan2(arg1, arg2)
mask = logical_and(greater(arg1, zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype)/2)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_greater__equal_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is greater than `0` and `x2_i` is `-0`, the result is an implementation-dependent approximation to `+π/2`.
"""
res = atan2(arg1, arg2)
mask = logical_and(greater(arg1, zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, -zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype)/2)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__greater_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+0` and `x2_i` is greater than `0`, the result is `+0`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, zero(arg1.shape, arg1.dtype)), greater(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (zero(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__greater_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-0` and `x2_i` is greater than `0`, the result is `-0`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -zero(arg1.shape, arg1.dtype)), greater(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-zero(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+0` and `x2_i` is `+0`, the result is `+0`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (zero(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+0` and `x2_i` is `-0`, the result is an implementation-dependent approximation to `+π`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, -zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_3(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-0` and `x2_i` is `+0`, the result is `-0`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-zero(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_4(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-0` and `x2_i` is `-0`, the result is an implementation-dependent approximation to `-π`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, -zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_5(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+infinity` and `x2_i` is finite, the result is an implementation-dependent approximation to `+π/2`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, infinity(arg1.shape, arg1.dtype)), isfinite(arg2))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype)/2)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_6(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-infinity` and `x2_i` is finite, the result is an implementation-dependent approximation to `-π/2`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -infinity(arg1.shape, arg1.dtype)), isfinite(arg2))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype)/2)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_7(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+infinity` and `x2_i` is `+infinity`, the result is an implementation-dependent approximation to `+π/4`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, infinity(arg1.shape, arg1.dtype)), exactly_equal(arg2, infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype)/4)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_8(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+infinity` and `x2_i` is `-infinity`, the result is an implementation-dependent approximation to `+3π/4`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, infinity(arg1.shape, arg1.dtype)), exactly_equal(arg2, -infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+3*π(arg1.shape, arg1.dtype)/4)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_9(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-infinity` and `x2_i` is `+infinity`, the result is an implementation-dependent approximation to `-π/4`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -infinity(arg1.shape, arg1.dtype)), exactly_equal(arg2, infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype)/4)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__equal_10(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-infinity` and `x2_i` is `-infinity`, the result is an implementation-dependent approximation to `-3π/4`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -infinity(arg1.shape, arg1.dtype)), exactly_equal(arg2, -infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-3*π(arg1.shape, arg1.dtype)/4)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__less_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `+0` and `x2_i` is less than `0`, the result is an implementation-dependent approximation to `+π`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, zero(arg1.shape, arg1.dtype)), less(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_equal__less_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is `-0` and `x2_i` is less than `0`, the result is an implementation-dependent approximation to `-π`.
"""
res = atan2(arg1, arg2)
mask = logical_and(exactly_equal(arg1, -zero(arg1.shape, arg1.dtype)), less(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_less__equal_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is less than `0` and `x2_i` is `+0`, the result is an implementation-dependent approximation to `-π/2`.
"""
res = atan2(arg1, arg2)
mask = logical_and(less(arg1, zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype)/2)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_less__equal_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is less than `0` and `x2_i` is `-0`, the result is an implementation-dependent approximation to `-π/2`.
"""
res = atan2(arg1, arg2)
mask = logical_and(less(arg1, zero(arg1.shape, arg1.dtype)), exactly_equal(arg2, -zero(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype)/2)[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_greater_equal__equal_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is greater than `0`, `x1_i` is a finite number, and `x2_i` is `+infinity`, the result is `+0`.
"""
res = atan2(arg1, arg2)
mask = logical_and(logical_and(greater(arg1, zero(arg1.shape, arg1.dtype)), isfinite(arg1)), exactly_equal(arg2, infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (zero(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_greater_equal__equal_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is greater than `0`, `x1_i` is a finite number, and `x2_i` is `-infinity`, the result is an implementation-dependent approximation to `+π`.
"""
res = atan2(arg1, arg2)
mask = logical_and(logical_and(greater(arg1, zero(arg1.shape, arg1.dtype)), isfinite(arg1)), exactly_equal(arg2, -infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (+π(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_less_equal__equal_1(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is less than `0`, `x1_i` is a finite number, and `x2_i` is `+infinity`, the result is `-0`.
"""
res = atan2(arg1, arg2)
mask = logical_and(logical_and(less(arg1, zero(arg1.shape, arg1.dtype)), isfinite(arg1)), exactly_equal(arg2, infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-zero(arg1.shape, arg1.dtype))[mask])
@given(numeric_arrays, numeric_arrays)
def test_atan2_special_cases_two_args_less_equal__equal_2(arg1, arg2):
"""
Special case test for `atan2(x1, x2, /)`:
- If `x1_i` is less than `0`, `x1_i` is a finite number, and `x2_i` is `-infinity`, the result is an implementation-dependent approximation to `-π`.
"""
res = atan2(arg1, arg2)
mask = logical_and(logical_and(less(arg1, zero(arg1.shape, arg1.dtype)), isfinite(arg1)), exactly_equal(arg2, -infinity(arg2.shape, arg2.dtype)))
assert_exactly_equal(res[mask], (-π(arg1.shape, arg1.dtype))[mask])
| true |
4a86bb3dfb25dd90f71c488dcc084e913df87edc | Python | Zararthustra/holbertonschool-higher_level_programming | /0x0F-python-object_relational_mapping/9-model_state_filter_a.py | UTF-8 | 789 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python3
"""
lists all State objects that contain the letter a from
the database hbtn_0e_6_usa
"""
import sqlalchemy
import sys
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from model_state import Base, State
if __name__ == "__main__":
username = sys.argv[1]
password = sys.argv[2]
db_name = sys.argv[3]
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'
.format(username, password, db_name))
Session = sessionmaker(bind=engine)
session = Session()
Base.metadata.create_all(engine)
query = session.query(State).order_by(State.id).filter(
State.name.like('%a%'))
for record in query:
print("{}: {}".format(record.id, record.name))
session.close()
| true |
a7e21e100a132df9a3ed88666c965a0ce6e6807d | Python | delaven007/AI | /2/5-ridge-岭回归2.py | UTF-8 | 1,035 | 3.046875 | 3 | [] | no_license | import numpy as np
import sklearn.linear_model as lm
import matplotlib.pyplot as mp
# 采集数据
x, y = np.loadtxt('./data/ml_data/abnormal.txt', delimiter=',', usecols=(0,1), unpack=True)
x = x.reshape(-1, 1)
# 创建线性回归模型
model = lm.LinearRegression()
# 训练模型
model.fit(x, y)
# 根据输入预测输出
pred_y1 = model.predict(x)
# 创建岭回归模型
model = lm.Ridge(150, fit_intercept=True, max_iter=10000)
# 训练模型
model.fit(x, y)
# 根据输入预测输出
pred_y2 = model.predict(x)
mp.figure('Linear & Ridge', facecolor='lightgray')
mp.title('Linear & Ridge', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.scatter(x, y, c='dodgerblue', alpha=0.75,
s=60, label='Sample')
sorted_indices = x.T[0].argsort()
mp.plot(x[sorted_indices], pred_y1[sorted_indices],
c='orangered', label='Linear')
mp.plot(x[sorted_indices], pred_y2[sorted_indices],
c='limegreen', label='Ridge')
mp.legend()
mp.show() | true |
a59a298ad1e8b4273f4bcc5d264b84d72eec2688 | Python | decentjik1128/python_code | /ch_1/pythonic_code/list_comprehensions.py | UTF-8 | 793 | 3.828125 | 4 | [] | no_license | #List Comprehension
result = [i for i in range(10)]
print(result)
#조건을 만족할 때만 추가
result = [i for i in range(10) if i%2 == 0]
print(result)
#이중 for문 방식
word_1 = 'Hello'
word_2 = 'World'
#1차원 방
result = [i+j for i in word_1 for j in word_2]
print(result)
case_1 = ['A', 'B', 'C']
case_2 = ['D', 'E', 'A']
#1차원 방식
result = [i+j for i in case_1 for j in case_2]
print(result)
#2차원 방식
result = [[i+j for i in case_1] for j in case_2]
print(result)
#1차원 방식 + 필터 추가
result = [i+j for i in case_1 for j in case_2 if not(i==j)]
print(result)
result.sort()
print(result)
words = 'The quick brown fox jumps over the lazy dogl'.split()
print(words)
stuff=[[w.upper() , w.lower(), len(w)]for w in words]
for i in stuff:
print(i)
| true |
899323fab920fa2c86edc03662a8fdab5cca0ac3 | Python | mwstobo/rent-toronto | /cache.py | UTF-8 | 1,049 | 2.90625 | 3 | [
"MIT"
] | permissive | """Caching for advert ids"""
from typing import List
import redis
import config
REDIS_POOL = redis.ConnectionPool(host=config.REDIS_HOST, decode_responses=True)
ADVERT_IDS_KEY = "adverts"
ADVERT_INFO_KEY = "advert_info"
def contains_id(advert_id: str) -> bool:
"""Check if this advert is in the cache"""
client = redis.StrictRedis(connection_pool=REDIS_POOL)
return client.sismember(ADVERT_IDS_KEY, advert_id) == 1
def add_ids(advert_ids: List[str]) -> None:
"""Add the advert to the cache"""
client = redis.StrictRedis(connection_pool=REDIS_POOL)
client.sadd(ADVERT_IDS_KEY, *advert_ids)
def contains_info(advert_info: str) -> bool:
"""Check if this advert info hash is in the cache"""
client = redis.StrictRedis(connection_pool=REDIS_POOL)
return client.sismember(ADVERT_INFO_KEY, advert_info) == 1
def add_info(advert_info: List[str]) -> None:
"""Add the advert info hash to the cache"""
client = redis.StrictRedis(connection_pool=REDIS_POOL)
client.sadd(ADVERT_INFO_KEY, *advert_info)
| true |
f949490feda8260fcbd2bfd44409522012978172 | Python | tessyoncom/lessons | /greet.py | UTF-8 | 98 | 2.71875 | 3 | [] | no_license | tes = 'Hello, World!'
print(tes)
if 5<10:
print("hurry, I know maths!")
print("program ends")
| true |
cb99f15517f56a5d6a8d6374a0274c0b58a7ef12 | Python | DiniH1/python_engineer89_basics | /variables.py | UTF-8 | 1,597 | 4.75 | 5 | [] | no_license | # lets test
print("Hello Dini H")
#print func used to display outcome provided in the string
#Variables
#python variables as a place holder to store data
# it could me a string "anything between these quotations"
# integers/numbers
#Syntax to create a variable name of the variable = value of the variable
#foolow your logical naming convention
# First_Name = 'Dini'
# Last_Name = 'Hassan'
# #lets create val to store int val
# Salary = 10.5 #float val contains decimal
# age = 19 #int value
# my_age = '22'
#print(First_Name)
#print(Last_Name)
#print(Salary)
#print(age)
#print(my_age)
#type(age) helps us find the type of variable
#print(type(age))
#print(type(my_age))#will
#input() is python to interact with user to ask user required data
# user_name = input("Please enter your name ")
# print('Hello ')
#
# print(user_name)
# Ativity
# variables first_name, last_name, age, DOB
#prompt user is input above below
#print/display the type of each val recieved from user
#then display the data back to the user with greeting message
# Activity/task
#
# variables first_name, last_name, age, DOB
# prompt user to input above value
# print/display the type of each value received from the user
# then display the data back to user with greeting message
first_name = input("Please enter your first name ")
last_name = input("Please enter your last name ")
age = input("What is your age? ")
DOB = input("What is your Date of birth? ")
print(type(first_name))
print(type(last_name))
print(type(age))
print(type(DOB))
print("Hello")
print(first_name)
print(last_name)
print(age)
print(DOB)
| true |
662a4b1ae882a22b1448d866d24e781b22072fa2 | Python | RajeshDas7/webscraping | /tweeter/twitter_fetch_hashtag.py | UTF-8 | 921 | 2.75 | 3 | [] | no_license | import tweepy
consumer_key = "nvEV4sEBSWM3HjwkcPu9ug6VR"
consumer_secret = "3I6VFDNLbRGGkq7um1RqouLFs7EArViu3KoKMdN72QzN2i7Mwm"
access_token = "1086269917295390720-rwbnIFrN2tjmQNjmr4dh849WH2Aewk"
access_token_secret = "hwweAzNe6ltT9MaFHRaFTk7ZJPd04a6HdFHuDUDEKniyH"
import csv
# import pandas as pd
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
#####United Airlines
# Open/Create a file to append data
csvFile = open('caa.csv', 'w')
#Use csv Writer
csvWriter = csv.writer(csvFile)
# query = 'python'
# max_tweets = 1000
# searched_tweets = [status for status in tweepy.Cursor(api.search, q=query).items(max_tweets)]
# print(searched_tweets)
for tweet in tweepy.Cursor(api.search,q="#caa",count=100,lang="en",since="2017-04-03").items():
# print(tweet)
# print (tweet.created_at, tweet.text)
csvWriter.writerow(
[tweet.created_at, tweet.favorite_count, tweet.text.encode('utf-8')])
| true |
3128c86386e2f379053ea5f73dc056f6d5c39370 | Python | coti/adventofcode | /day13/day13part1.py | UTF-8 | 2,163 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import itertools
def parseFile( line ):
line = line.split( '.\n' )[0]
tab = line.split( ' ' )
a = tab[0]
b = tab[-1]
h = -1
try:
h = int(tab[3])
except ValueError:
print "happyness", tab[3], "error"
return None
if tab[2] == "lose" :
h = -h
return a, b, h
def insertPersonInTheTable( p, tab ):
if p not in tab[0]:
tab[0].append( p )
for l in tab[1:]:
l.append( 0 )
tab.append( [0 for x in range( len( tab[0] )+1 ) ] )
tab[-1][0] = p
return
def printTable( tab ):
for l in tab:
print l
return
def findCoupleInTheTable( couple, tab ):
a, b, h = couple
insertPersonInTheTable( a, tab )
insertPersonInTheTable( b, tab )
i = tab[0].index( a )
j = tab[0].index( b )
return i+1, j+1
def insertCouple( couple, tab ):
i, j = findCoupleInTheTable( couple, tab )
h = couple[2]
tab[i][j] = h
return
def computeArrangements( tab ):
arrangements = list( itertools.permutations( tab[0] ))
return arrangements
def getHappinessOfCouple( couple, tab ):
a, b = couple
i = tab[0].index( a )
j = tab[0].index( b )
return tab[i+1][j+1]
def getHappinessOfArrangement( arr, tab ):
h = 0
for i in range( len( arr ) ):
h += getHappinessOfCouple( ( arr[i-1], arr[i]), tab )
h += getHappinessOfCouple( (arr[i], arr[i-1]), tab)
return h
def getBestArrangement( arr, tab ):
c = -1
best = arr[0]
for a in arr:
n = getHappinessOfArrangement( a, tab )
if n > c:
c = n
best = a
return best, c
def main( argv ):
if 1 == len( argv ):
print "Please enter input file"
return -1
nb = 0
d = [[]]
fd = open( argv[1], 'r' )
for line in fd:
couple = parseFile( line )
insertCouple( couple, d )
fd.close()
arr = computeArrangements( d )
a, c = getBestArrangement( arr, d )
print c, "cost of", a
return nb
if __name__ == "__main__":
count = main( sys.argv )
| true |
be3521d923cfa433022aa5f8f4290b6a7d8bae1c | Python | StoneCong/tools | /teaching_kids/001.your_name.py | UTF-8 | 116 | 3.765625 | 4 | [] | no_license | # this will ask for your name and then print it out for you.
name = input("What is your name? ")
print("Hi,", name)
| true |
c9a5faff9139475cc1deb3ca4a09f1d8989460eb | Python | ishine/SpectralCluster | /tests/utils_test.py | UTF-8 | 2,849 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import unittest
import numpy as np
from spectralcluster import utils
class TestComputeAffinityMatrix(unittest.TestCase):
"""Tests for the compute_affinity_matrix function."""
def test_4by2_matrix(self):
matrix = np.array([[3, 4], [-4, 3], [6, 8], [-3, -4]])
affinity = utils.compute_affinity_matrix(matrix)
expected = np.array([[1, 0.5, 1, 0], [0.5, 1, 0.5, 0.5], [1, 0.5, 1, 0],
[0, 0.5, 0, 1]])
self.assertTrue(np.array_equal(expected, affinity))
class TestComputeSortedEigenvectors(unittest.TestCase):
"""Tests for the compute_sorted_eigenvectors function."""
def test_3by2_matrix(self):
matrix = np.array([[1, 2], [3, 4], [1, 3]])
affinity = utils.compute_affinity_matrix(matrix)
w, v = utils.compute_sorted_eigenvectors(affinity)
self.assertEqual((3,), w.shape)
self.assertEqual((3, 3), v.shape)
self.assertGreater(w[0], w[1])
self.assertGreater(w[1], w[2])
def test_ascend(self):
matrix = np.array([[1, 2], [3, 4], [1, 3]])
affinity = utils.compute_affinity_matrix(matrix)
w, v = utils.compute_sorted_eigenvectors(affinity, descend=False)
self.assertEqual((3,), w.shape)
self.assertEqual((3, 3), v.shape)
self.assertLess(w[0], w[1])
self.assertLess(w[1], w[2])
class TestComputeNumberOfClusters(unittest.TestCase):
"""Tests for the compute_number_of_clusters function."""
def test_5_values(self):
eigenvalues = np.array([1.0, 0.9, 0.8, 0.2, 0.1])
result, max_delta_norm = utils.compute_number_of_clusters(eigenvalues)
self.assertEqual(3, result)
self.assertTrue(np.allclose(4.0, max_delta_norm, atol=0.01))
def test_max_clusters(self):
max_clusters = 2
eigenvalues = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5])
result_1, max_delta_norm_1 = utils.compute_number_of_clusters(eigenvalues)
self.assertEqual(5, result_1)
self.assertTrue(np.allclose(1.2, max_delta_norm_1, atol=0.01))
result_2, max_delta_norm_2 = utils.compute_number_of_clusters(
eigenvalues, max_clusters=max_clusters)
self.assertEqual(max_clusters, result_2)
self.assertTrue(np.allclose(1.125, max_delta_norm_2, atol=0.01))
def test_ascend(self):
eigenvalues = np.array([1.0, 0.9, 0.8, 0.2, 0.1])
result, max_delta_norm = utils.compute_number_of_clusters(
eigenvalues, max_clusters=3, descend=False)
self.assertEqual(2, result)
self.assertTrue(np.allclose(0.88, max_delta_norm, atol=0.01))
class TestEnforceOrderedLabels(unittest.TestCase):
"""Tests for the enforce_ordered_labels function."""
def test_small_array(self):
labels = np.array([2, 2, 1, 0, 3, 3, 1])
expected = np.array([0, 0, 1, 2, 3, 3, 1])
result = utils.enforce_ordered_labels(labels)
self.assertTrue(np.array_equal(expected, result))
if __name__ == "__main__":
unittest.main()
| true |
a9710c0f4a245cd63a4bd92fa919ff228a1766f4 | Python | vectominist/MedNLP | /src/model/qa_model_rulebase_2.py | UTF-8 | 3,969 | 2.546875 | 3 | [
"MIT"
] | permissive | '''
File [ src/model/qa_model_rulebase_2.py ]
Author [ Chun-Wei Ho & Heng-Jui Chang (NTUEE) ]
Synopsis [ New rule-based QA method ]
'''
import numpy as np
import tqdm
import edit_distance
import re
import multiprocessing as mp
inv_chars = '錯|誤|有誤|不|沒|(非(?!常|洲))|(無(?!套))'
def is_inv(sent: str):
return bool(re.search(inv_chars, sent)), re.sub(inv_chars, '', sent)
def invert_sentiment(sent: str):
if bool(re.search(inv_chars, sent)):
# negative
if sent.find('沒有') >= 0:
sent = sent.replace('沒有', '有')
elif sent.find('不是') >= 0:
sent = sent.replace('不是', '是')
elif sent.find('不可能') >= 0:
sent = sent.replace('不可能', '可能')
sent = re.sub(inv_chars, '', sent)
else:
# positive
if sent.find('有') >= 0:
sent = sent.replace('有', '沒有')
elif sent.find('是') >= 0:
sent = sent.replace('是', '不是')
elif sent.find('可能') >= 0:
sent = sent.replace('可能', '不可能')
return sent
def get_sim(sent: str, doc: list):
match_sm = [edit_distance.SequenceMatcher(
i, sent, action_function=edit_distance.highest_match_action) for i in doc]
match_score = np.array([i.matches() for i in match_sm], dtype=np.float32)
match_score -= match_score[match_score <=
np.percentile(match_score, 80)].mean()
match_score[match_score < 0] = 0
_filter = [1, 0.4, 0.4, 0.2]
match_score = np.convolve(match_score, _filter, 'full')[:-len(_filter) + 1]
match_score[-1] += 1e-10
return match_score
def get_sim_with_inv(sent: str, doc: list):
match_sm = [edit_distance.SequenceMatcher(
i, sent, action_function=edit_distance.highest_match_action) for i in doc]
match_score = np.array([i.matches() for i in match_sm], dtype=np.float32)
match_len = []
for sm, s in zip(match_sm, doc):
blocks = [*sm.get_matching_blocks()]
if len(blocks) == 0:
match_len.append(0)
else:
match_len.append(blocks[-1][0] - blocks[0][0] + 1)
match_score = match_score * \
(match_score / (np.array(match_len) + 1e-10)) ** 0.5
match_score -= match_score.mean()
_filter = [1, 0.5, 0.4, 0.1]
match_score = np.convolve(match_score, _filter, 'full')[:-len(_filter) + 1]
sent_inv = is_inv(sent)[0]
inv = [is_inv(i)[0] ^ sent_inv for i in doc]
match_score[inv] *= -1
_filter = [1, 0.6, 0.36]
match_score = np.convolve(match_score, _filter, 'full')[:-len(_filter) + 1]
match_score[-1] += 1e-10
return match_score
class RuleBaseQA2():
def predict(self, dataset):
with tqdm.tqdm(dataset) as prog_bar:
with mp.Pool() as p:
answers = p.map(self._predict_single_question, prog_bar)
scores = [a[0] for a in answers]
is_inv = [a[1] for a in answers]
return np.array(scores), np.array(is_inv, dtype=bool)
def _predict_single_question(self, question):
doc = question['doc']
stem = question['stem']
choices = question['choices']
stem = re.sub("下列|關於|何者|敘述|民眾|請問|正確|的|醫師", '', stem)
inv, stem = is_inv(stem)
choices = [re.sub('|民眾|醫師|的|覺得|這件事|這', '', i)
for i in choices]
if inv:
ref_sim = get_sim(stem, doc)
sim = [get_sim_with_inv(i, doc) for i in choices]
score = np.corrcoef([ref_sim, *sim])[0, 1:]
score2 = ((score + 1) / 2) ** 0.6 * np.max(sim, axis=1)
return np.argmin(score2), True
else:
ref_sim = get_sim(stem, doc)
sim = [get_sim(i, doc) for i in choices]
score = np.cov([ref_sim, *sim])[0, 1:]
return np.argmax(score), False
if __name__ == '__main__':
pass
| true |
9439da95bdf627509cf8fe25d37f12226346b06e | Python | dawidbrzozowski/sentiment_analysis | /text_clsf_lib/preprocessing/vectorization/data_vectorizers.py | UTF-8 | 933 | 3.125 | 3 | [] | no_license | from text_clsf_lib.preprocessing.vectorization.output_vectorizers import OutputVectorizer
from text_clsf_lib.preprocessing.vectorization.text_vectorizers import TextVectorizer
class DataVectorizer:
"""
This class is meant to vectorize X and y (texts and outputs).
To perform that, it uses TextVectorizer and OutputVectorizer.
vectorize(...) method should return X and y vectorized.
"""
def __init__(self, text_vectorizer: TextVectorizer, output_vectorizer: OutputVectorizer):
self.text_vectorizer = text_vectorizer
self.output_vectorizer = output_vectorizer
def fit(self, texts, outputs):
self.text_vectorizer.fit(texts)
self.output_vectorizer.fit(outputs)
def vectorize(self, texts, outputs):
return self.text_vectorizer.vectorize(texts), self.output_vectorizer.vectorize(outputs)
def save(self, save_dir):
self.text_vectorizer.save(save_dir)
| true |
f5f25b3ed4946536b875ae34afa736b28792f7b6 | Python | mrirecon/SSA-FARY | /SupFig4/plot.py | UTF-8 | 2,900 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
# Copyright 2020. Uecker Lab, University Medical Center Goettingen.
#
# Author: Sebastian Rosenzweig, 2020
# sebastian.rosenzweig@med.uni-goettingen.de
#
# Script to reproduce SupFig4 of the following manuscript:
#
# Rosenzweig S et al.
# Cardiac and Respiratory Self-Gating in Radial MRI using an
# Adapted Singular Spectrum Analysis (SSA-FARY).
# IEEE Trans Med Imag. 2020
import sys
import os
sys.path.insert(0, os.path.join(os.environ['TOOLBOX_PATH'], 'python'))
from cfl import readcfl
from cfl import writecfl
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
color = ["#348ea9","#ef4846","#52ba9b","#f48b37", "#89c2d4","#ef8e8d","#a0ccc5","#f4b481"]
linestyle = ["-", "--", "-.", ":"]
marker = ["o", "^", "s", "8"]
import matplotlib.font_manager as font_manager
from matplotlib import rcParams
mpl.rcParams.update({'font.size': 22})
path = '../ssa_fary_utils/LinBiolinum_R.otf'
prop = font_manager.FontProperties(fname=path)
mpl.rcParams['font.family'] = prop.get_name()
import pandas as pd
args=["EOF_751", "PCA", "_A", "_B"]
EOF = np.squeeze(readcfl(str(args[0])))
PCA = np.squeeze(readcfl(str(args[1])))
DPI = 200
######
# EOF
######
#%%
rows=1
cols=1
fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=(1500/DPI, 900/DPI))
ax.grid()
ax.set_ylabel("Amplitude [a.u.]")
ax.set_xlabel("Samples [a.u.]")
norm = np.max(EOF[:,0:2])
ax.plot(np.real(EOF[:,0]/norm), color=color[0], linestyle=linestyle[0], linewidth=2, label="EOF 1")
ax.plot(np.real(EOF[:,1]/norm), color=color[4], linestyle=linestyle[0], linewidth=2, label="EOF 2")
plt.legend(loc=4)
fig.savefig(str(args[-2]) + ".png", dpi=DPI, bbox_inches='tight')
#%%
rows=1
cols=1
fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=(1500/DPI, 900/DPI))
ax.grid()
ax.set_ylabel("Amplitude [a.u.]")
ax.set_xlabel("Samples [a.u.]")
norm = np.max(EOF[:,2:4])
ax.plot(np.real(EOF[:,2]/norm), color=color[0], linestyle=linestyle[0], linewidth=2, label="EOF 3")
ax.plot(np.real(EOF[:,3]/norm), color=color[4], linestyle=linestyle[0], linewidth=2, label="EOF 4")
plt.legend(loc=4)
fig.savefig(str(args[-1]) + ".png", dpi=DPI, bbox_inches='tight')
######
# PCA
######
#%%
rows=1
cols=1
fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=(1500/DPI, 900/DPI))
ax.grid()
ax.set_ylabel("Amplitude [a.u.]")
ax.set_xlabel("Samples [a.u.]")
norm = np.max(PCA[:,0])
ax.plot(np.real(PCA[:,0]/norm), color=color[1], linestyle=linestyle[0], linewidth=2,)
fig.savefig(str(args[-2]) + "_PCA.png", dpi=DPI, bbox_inches='tight')
#%%
rows=1
cols=1
fig, ax = plt.subplots(nrows=rows, ncols=cols, figsize=(1500/DPI, 900/DPI))
ax.grid()
ax.set_ylabel("Amplitude [a.u.]")
ax.set_xlabel("Samples [a.u.]")
norm = np.max(PCA[:,1])
ax.plot(np.real(PCA[:,1]/norm), color=color[1], linestyle=linestyle[0], linewidth=2,)
fig.savefig(str(args[-1]) + "_PCA.png", dpi=DPI, bbox_inches='tight')
| true |
8683e4b2fb78ec57c1566e971614ab1878b9433c | Python | VP-0822/miniexcel | /src/excel.py | UTF-8 | 2,200 | 2.875 | 3 | [] | no_license | import JSONDeserializer
import workbook
class WorkbookHandler:
'This class handles workbook opening/closing jobs.'
#dictionary to maintain opened workbooks against thier file paths
opened_workbooks = {}
def __init__(self, workbook_name):
self.workbook_name = workbook_name
self.workbook = None
def load_workbook_data(self, data_file_path):
# load workbook from json file
self.workbook_file_path = data_file_path
workbook_file = open(data_file_path, 'r')
workbook_json_data = workbook_file.read().replace('\n', '')
workbook_file.close()
self.workbook = JSONDeserializer.deserialize_workbook(workbook_json_data)
self.opened_workbooks[self.workbook_file_path] = self.workbook
def write_workbook_data(self, workbook_file_path=None):
#write workbook data into JSON file
if workbook_file_path is not None
self.workbook_file_path = workbook_file_path
workbook_json_data = self.workbook.toJSON()
workbook_file = open(self.workbook_file_path, 'w')
workbook_file.write(workbook_json_data)
workbook_file.close()
self.opened_workbooks[self.workbook_file_path] = self.workbook
def close_workbook(self, save_and_close=True):
if save_and_close == False:
del self.opened_workbooks[self.workbook_file_path]
else:
self.write_workbook_data()
del self.opened_workbooks[self.workbook_file_path]
def get_all_opened_workbooks(self):
return self.opened_workbooks.values()
def get_workbook_processor(self, workbook_file_path=None, workbook=None):
if workbook is not None:
return self.__get_workbook_processor_inner(workbook)
elif workbook_file_path is not None:
return self.__get_workbook_processor_for_filepath(workbook_file_path)
else:
return None
def __get_workbook_processor_inner(self, workbook):
return self.opened_workbooks[workbook.workbook_file_path]
def __get_workbook_processor_for_filepath(self, workbook_file_path):
return self.opened_workbooks[workbook_file_path] | true |
2d1ec10a765c9ae7deee7b322729adf03793c09b | Python | pcicales/MICCAI_2021_aglom | /utils/eval_utils.py | UTF-8 | 10,030 | 2.78125 | 3 | [] | no_license | import torch
import numpy as np
import matplotlib.pyplot as plt
# from sklearn.utils.multiclass import unique_labels
import os
def get_binary_accuracy(y_true, y_prob):
assert y_true.ndim == 1 and y_true.size() == y_prob.size()
y_prob = y_prob > 0.5
return (y_true == y_prob).sum().item() / y_true.size(0)
def compute_accuracy(target, output, classes):
"""
Calculates the classification accuracy.
:param target: Tensor of correct labels of size [batch_size]
:param output: Tensor of model predictions of size [batch_size, num_classes]
:return: prediction accuracy
"""
num_samples = target.size(0)
if classes == 2:
accuracy = get_binary_accuracy(target, output.squeeze(1))
else:
num_correct = torch.sum(target == torch.argmax(output, dim=1))
accuracy = num_correct.float() / num_samples
return accuracy
def mutual_info(mc_prob):
"""
computes the mutual information
:param mc_prob: List MC probabilities of length mc_simulations;
each of shape of shape [batch_size, num_cls]
:return: mutual information of shape [batch_size, num_cls]
"""
eps = 1e-5
mean_prob = mc_prob.mean(axis=0)
first_term = -1 * np.sum(mean_prob * np.log(mean_prob + eps), axis=1)
second_term = np.sum(np.mean([prob * np.log(prob + eps) for prob in mc_prob], axis=0), axis=1)
return first_term + second_term
def predictive_entropy(prob):
"""
Entropy of the probabilities (to measure the epistemic uncertainty)
:param prob: probabilities of shape [batch_size, C]
:return: Entropy of shape [batch_size]
"""
eps = 1e-5
return -1 * np.sum(np.log(prob+eps) * prob, axis=1)
def save_confusion_matrix(y_true, y_pred, classes, dest_path,
normalize=False,
title=None,
cmap=plt.cm.Greens):
"""
# This function plots and saves the confusion matrix.
# Normalization can be applied by setting `normalize=True`.
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(dest_path)"""
def uncertainty_fraction_removal(y, y_pred, y_var, num_fracs, num_random_reps, save=False, save_dir=''):
fractions = np.linspace(1 / num_fracs, 1, num_fracs)
num_samples = y.shape[0]
acc_unc_sort = np.array([])
acc_pred_sort = np.array([])
acc_random_frac = np.zeros((0, num_fracs))
remain_samples = []
# uncertainty-based removal
inds = y_var.argsort()
y_sorted = y[inds]
y_pred_sorted = y_pred[inds]
for frac in fractions:
y_temp = y_sorted[:int(num_samples * frac)]
remain_samples.append(len(y_temp))
y_pred_temp = y_pred_sorted[:int(num_samples * frac)]
acc_unc_sort = np.append(acc_unc_sort, np.sum(y_temp == y_pred_temp) / y_temp.shape[0])
# random removal
for rep in range(num_random_reps):
acc_random_sort = np.array([])
perm = np.random.permutation(y_var.shape[0])
y_sorted = y[perm]
y_pred_sorted = y_pred[perm]
for frac in fractions:
y_temp = y_sorted[:int(num_samples * frac)]
y_pred_temp = y_pred_sorted[:int(num_samples * frac)]
acc_random_sort = np.append(acc_random_sort, np.sum(y_temp == y_pred_temp) / y_temp.shape[0])
acc_random_frac = np.concatenate((acc_random_frac, np.reshape(acc_random_sort, [1, -1])), axis=0)
acc_random_m = np.mean(acc_random_frac, axis=0)
acc_random_s = np.std(acc_random_frac, axis=0)
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(fractions, acc_unc_sort, 'o-', lw=1.5, label='uncertainty-based', markersize=3, color='royalblue')
line1, = ax.plot(fractions, acc_random_m, 'o', lw=1, label='Random', markersize=3, color='black')
ax.fill_between(fractions,
acc_random_m - acc_random_s,
acc_random_m + acc_random_s,
color='black', alpha=0.3)
line1.set_dashes([1, 1, 1, 1]) # 2pt line, 2pt break, 10pt line, 2pt break
ax.set_xlabel('Fraction of Retained Data')
ax.set_ylabel('Prediction Accuracy')
if save:
plt.savefig(os.path.join(save_dir, 'uncertainty_fraction_removal.svg'))
return acc_unc_sort, acc_random_frac
def combo_uncertainty_fraction_removal(y, y_pred, y_var, aug_pred, aug_var, num_fracs, num_random_reps, save=False, save_dir=''):
fractions = np.linspace(1 / num_fracs, 1, num_fracs)
num_samples = y.shape[0]
acc_unc_sort = np.array([])
acc_pred_sort = np.array([])
acc_random_frac = np.zeros((0, num_fracs))
remain_samples = []
# uncertainty-based removal (baseline)
inds = y_var.argsort()
y_sorted = y[inds]
y_pred_sorted = y_pred[inds]
for frac in fractions:
y_temp = y_sorted[:int(num_samples * frac)]
remain_samples.append(len(y_temp))
y_pred_temp = y_pred_sorted[:int(num_samples * frac)]
acc_unc_sort = np.append(acc_unc_sort, np.sum(y_temp == y_pred_temp) / y_temp.shape[0])
# augmented unc based removal
acc_unc_sort_aug = np.array([])
acc_pred_sort_aug = np.array([])
acc_random_frac_aug = np.zeros((0, num_fracs))
remain_samples_aug = []
# uncertainty-based removal
aug_inds = aug_var.argsort()
y_sorted = y[aug_inds]
aug_pred_sorted = aug_pred[aug_inds]
for frac in fractions:
y_temp = y_sorted[:int(num_samples * frac)]
remain_samples_aug.append(len(y_temp))
aug_pred_temp = aug_pred_sorted[:int(num_samples * frac)]
acc_unc_sort_aug = np.append(acc_unc_sort_aug, np.sum(y_temp == aug_pred_temp) / y_temp.shape[0])
# random removal
for rep in range(num_random_reps):
acc_random_sort = np.array([])
perm = np.random.permutation(y_var.shape[0])
y_sorted = y[perm]
y_pred_sorted = y_pred[perm]
for frac in fractions:
y_temp = y_sorted[:int(num_samples * frac)]
y_pred_temp = y_pred_sorted[:int(num_samples * frac)]
acc_random_sort = np.append(acc_random_sort, np.sum(y_temp == y_pred_temp) / y_temp.shape[0])
acc_random_frac = np.concatenate((acc_random_frac, np.reshape(acc_random_sort, [1, -1])), axis=0)
acc_random_m = np.mean(acc_random_frac, axis=0)
acc_random_s = np.std(acc_random_frac, axis=0)
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(fractions, acc_unc_sort, 'o-', lw=1.5, label='uncertainty-based_base', markersize=3, color='royalblue')
ax.plot(fractions, acc_unc_sort_aug, '-v', lw=1.5, label='uncertainty-based_aug', markersize=3, color='red')
line1, = ax.plot(fractions, acc_random_m, '-^', lw=1, label='Random', markersize=3, color='black')
ax.fill_between(fractions,
acc_random_m - acc_random_s,
acc_random_m + acc_random_s,
color='black', alpha=0.3)
line1.set_dashes([1, 1, 1, 1]) # 2pt line, 2pt break, 10pt line, 2pt break
ax.set_xlabel('Fraction of Retained Data')
ax.set_ylabel('Prediction Accuracy')
if save:
plt.savefig(os.path.join(save_dir, 'uncertainty_fraction_removal_combo.svg'))
def normalized_uncertainty_toleration_removal(y, y_pred, y_var, num_points, save=False, save_dir=''):
acc_uncertainty, acc_overall = np.array([]), np.array([])
num_cls = len(np.unique(y))
y_var = (y_var - y_var.min()) / (y_var.max() - y_var.min())
per_class_remain_count = np.zeros((num_points, num_cls))
per_class_acc = np.zeros((num_points, num_cls))
thresholds = np.linspace(0, 1, num_points)
remain_samples = []
for i, t in enumerate(thresholds):
idx = np.argwhere(y_var >= t)
y_temp = np.delete(y, idx)
remain_samples.append(len(y_temp))
y_pred_temp = np.delete(y_pred, idx)
acc_uncertainty = np.append(acc_uncertainty, np.sum(y_temp == y_pred_temp) / y_temp.shape[0])
if len(y_temp):
per_class_remain_count[i, :] = np.array([len(y_temp[y_temp == c]) for c in range(num_cls)])
per_class_acc[i, :] = np.array(
[np.sum(y_temp[y_temp == c] == y_pred_temp[y_temp == c]) / y_temp[y_temp == c].shape[0] for c in
range(num_cls)])
plt.figure()
plt.plot(thresholds, acc_uncertainty, lw=1.5, color='royalblue', marker='o', markersize=4)
plt.xlabel('Normalized Tolerated Model Uncertainty')
plt.ylabel('Prediction Accuracy')
if save:
plt.savefig(os.path.join(save_dir, 'uncertainty_toleration_removal.png'))
return(acc_uncertainty) | true |
d13f8aa0f2fb53bb59ac4258abaa6cefe7dc6ce1 | Python | ssj24/TIL | /03_django/03_django_form/articles/templatetags/make_link.py | UTF-8 | 862 | 2.765625 | 3 | [] | no_license | from django import template
register = template.Library() # 기존 템플릿 라이브러리에
@register.filter
def hashtag_link(word):
# word는 article 객체가 들어갈 건데
# article의 content들만 모두 가져와서 그 중 해시태그에만 링크를 붙인다
content = word.content + ' ' # 공백으로 구분하기 때문
hashtags = word.hashtags.all()
for hashtag in hashtags:
content = content.replace(hashtag.content+' ', f'<a href="/articles/{hashtag.pk}/hashtag/">{hashtag.content}<a/> ')
# html a태그를 씌운 hashtag.content로 바꿈
# 주소는 하드코딩을 해야 한다.(f 스트링으로 변수를 넣어줘야 함.))
#변경 전 내용도 공백을 포함하고
#변경 후 내용도 공백을 포함한다.(f 스트링 마지막에 공백))
return content
| true |
88e3daf1fd0e0a363f2749b1b434bfd2fb3a426a | Python | offbynull/offbynull.github.io | /docs/data/learn/Bioinformatics/input/ch4_code/src/helpers/HashableCollections.py | UTF-8 | 935 | 2.921875 | 3 | [] | no_license | from collections import Counter
class HashableCounter(Counter):
def __init__(self, v=None):
if v is None:
super().__init__()
else:
super().__init__(v)
def __hash__(self):
return hash(tuple(sorted(self.items())))
class HashableList(list):
def __init__(self, v=None):
if v is None:
super().__init__()
else:
super().__init__(v)
def __hash__(self):
return hash(tuple(self))
class HashableSet(set):
def __init__(self, v=None):
if v is None:
super().__init__()
else:
super().__init__(v)
def __hash__(self):
return hash(tuple(self))
class HashableDict(dict):
def __init__(self, v=None):
if v is None:
super().__init__()
else:
super().__init__(v)
def __hash__(self):
return hash(tuple(sorted(self.items()))) | true |
bb378cc47edd1ec722339c192c645b36c7fa5ba6 | Python | chenshanghao/Interview_preparation | /Leetcode_250/Problem_70/my_solution.py | UTF-8 | 501 | 3.453125 | 3 | [] | no_license | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
# Question 1: would n be smaller than 1 ?
# Question 2: would n be larger than maxint
# In Python 3, this question doesn't apply. The plain int type is unbounded.
# In python 2, sys.maxint
ans = [0, 1, 2]
for i in range(3, n+1, 1):
ans.append(ans[i-1] + ans[i-2])
return ans[n]
| true |
d3c4fb21c01d834e1dfabe7ceb04e1cce801fca3 | Python | jianhui-ben/leetcode_python | /2013. Detect Squares.py | UTF-8 | 1,406 | 4.34375 | 4 | [] | no_license | # 2013. Detect Squares
# You are given a stream of points on the X-Y plane. Design an algorithm that:
#
# Adds new points from the stream into a data structure. Duplicate points are allowed and should be treated as different points.
# Given a query point, counts the number of ways to choose three points from the data structure such that the three points and the query point form an axis-aligned square with positive area.
# An axis-aligned square is a square whose edges are all the same length and are either parallel or perpendicular to the x-axis and y-axis.
#
# Implement the DetectSquares class:
#
# DetectSquares() Initializes the object with an empty data structure.
# void add(int[] point) Adds a new point point = [x, y] to the data structure.
# int count(int[] point) Counts the number of ways to form axis-aligned squares with point point = [x, y] as described above.
class DetectSquares:
def __init__(self):
self.stored = Counter()
def add(self, point: List[int]) -> None:
x, y = point
self.stored[(x, y)] += 1
def count(self, point: List[int]) -> int:
x, y = point
out = 0
for exist_point, fre in self.stored.items():
ex_x, ex_y = exist_point
if ex_x != x and ex_y != y and abs(ex_x - x) == abs(ex_y - y):
out += self.stored[(ex_x, y)] * self.stored[(x, ex_y)] * fre
return out
| true |
5540d0a34c9c5ecb8073e3c270f44d7c05145f7c | Python | kiligsmile/python | /05_高级数据类型/sml_16_字符串判断方法.py | UTF-8 | 374 | 3.921875 | 4 | [] | no_license | # 1.判断空白字符
space_str = " "
print(space_str.isspace())
space_str = "a"
print(space_str.isspace())
space_str = "\t\n"
print(space_str.isspace())
# 1>都不能判断小数
# num_str="1.1"
# 2>unicode字符串
num_str = "\u00b2"
# 3>中文数字
num_str = "一千零一"
print(num_str)
print(num_str.isdecimal())
print(num_str.isdigit())
print(num_str.isnumeric())
| true |
46a745821501963813500cfb57708797a3896abb | Python | thevalzo/dataAnalytics2018 | /focused_crawler/focused_crawler/spiders/GDB_spyder.py | UTF-8 | 3,376 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
import unidecode
import MySQLdb
from bs4 import BeautifulSoup
class GDBSpider(scrapy.Spider):
# Spyder name
name = "GDB"
db = ""
def start_requests(self):
#Keywords to search in the search engine of GDB
#keywords=["brescia"]
keywords = ["città"]
actualKeyword=""
# Location for filtering the search results
locations=["brescia","Brescia","BRESCIA"]
actualLocation=""
# Sections for filtering the search results
sections=["Brescia e Hinterland"]
actualSection=""
# Url of GDB
url="https://www.giornaledibrescia.it/ricerca-nel-portale?fq=tag_dimension.Location:"
# Connect to DB
self.db = MySQLdb.connect(host="127.0.0.1",
user="root",
passwd="root",
db="data_analytics",
charset='utf8')
# Make a request to the search engine for every keyword, location and result page (default 1-500)
for i in range(0, keywords.__len__()):
actualKeyword = keywords[i]
for j in range(0, locations.__len__()):
actualLocation = locations[j]
for k in range(1, 500):
#meta contains some variables for the response's processing
yield scrapy.Request(url=url+str(actualLocation)+"&fq=tag_gdb.categ.root:"+sections[0]+"&q="+str(actualKeyword)+"&page="+str(k), callback=self.parse, meta={'dont_merge_cookies': True, 'keyword': actualKeyword, 'location': actualLocation, 'section': sections[0]})
def parse(self, response):
# Extract text
body = response.body
# Save variables for the response's processing
actualLocation=response.meta.get('location')
actualSection = response.meta.get('section')
actualKeyword=response.meta.get('keyword')
# Do the html parse
soup = BeautifulSoup(body, 'html.parser' , from_encoding='ISO-Latin-1')
soup.prettify()
[s.extract() for s in soup("div", {"class": "text-center"})]
dates = soup.findAll("span", {"class": "date"})
[s.extract() for s in soup("div", {"class": "list-item"})]
results = soup.find("ul", {"class": "panel-articles-list"})
# Filter all the links
results = results.findAll("a")
for i in range(0, len(results)):
# Build complete link
url = "https://www.giornaledibrescia.it"+results[i].get("href")
# Check for already inserted links
cursor = self.db.cursor()
query = "SELECT url, keyword, location FROM results WHERE url =\'" + str(url) + "\' AND keyword=\'" + str(actualKeyword) + "\'AND location=\'" + str(actualLocation) + "\';"
cursor.execute(query)
cursor.fetchall()
if (cursor.rowcount == 0 ):
# Insert link
cursor = self.db.cursor()
query = "INSERT INTO results (url, keyword, location, section, date) VALUES (\'"+url.decode('utf8')+"\', \'"+actualKeyword.decode('utf8')+"\', \'"+actualLocation+"\', \'"+actualSection+"\', \'"+str(dates[i].get_text())+"\');"
cursor.execute(query)
self.db.commit()
| true |
ab12a5d11ddc81bd90c421af7bf8f99426a16345 | Python | antofik/captcha | /statistics.py | UTF-8 | 1,326 | 2.78125 | 3 | [] | no_license | import os
import json
from library import *
try:
with open('cache.txt', 'r') as f:
cache = json.loads(f.read()) or {}
except Exception,e:
cache = {}
if not os.path.exists("letters"):
os.makedirs("letters")
s = {}
def check(image, index):
global cache
global s
im, t = filter_image(image)
b = find_letters(t.copy())
if len(b) != 5:
return
if not (index in cache):
return
answers = cache[index]
for i in xrange(len(answers)):
letter = chr(answers[i])
if not (letter in s):
s[letter] = {'width':0, 'height':0, 'count':0}
x,y,w,h = b[i]
s[letter]['width'] += w
s[letter]['height'] += h
s[letter]['count'] += 1
for i in xrange(0,100):
check('images/%s.jpg' % i, str(i))
sizes = [(letter, s[letter]['width']/s[letter]['count'], s[letter]['height']/s[letter]['count']) for letter in s]
sizes.sort()
print '\n--- High ---'
for l,w,h in sizes:
if ord(l) in cHigh:
print l,w,h
print '\n--- Wide ---'
for l,w,h in sizes:
if ord(l) in cWide:
print l,w,h
print '\n--- Others ---'
for l,w,h in sizes:
if ord(l) in cHigh:
pass
elif ord(l) in cWide:
pass
else:
print l,w,h
| true |
59cbb3aff9665ad2d7bfdf30db8be4d2329f27ed | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4162/codes/1800_2568.py | UTF-8 | 213 | 2.71875 | 3 | [] | no_license | from numpy import*
m = int(input("tamanho:"))
f = zeros(m, dtype=int)
d = "*"
e = "*"
g = ""
o = ""
for i in range(size(f)):
e = "*"
d = "*"
g = g + o
d = "*"*m
e = "*"*m
print(d+o+e)
m = m - 1
o = o +"oo" | true |
993d210b2086cefc927fefb05c593c920726aa68 | Python | ForceCry/iem | /scripts/coop/compute_climate.py | UTF-8 | 3,858 | 2.546875 | 3 | [] | no_license | # Computes the Climatology and fills out the table!
import mx.DateTime
import iemdb
import psycopg2.extras
import network
import sys
nt = network.Table(("IACLIMATE", "MNCLIMATE", "NDCLIMATE", "SDCLIMATE",
"NECLIMATE", "KSCLIMATE", "MOCLIMATE", "ILCLIMATE", "WICLIMATE",
"MICLIMATE", "INCLIMATE", "OHCLIMATE", "KYCLIMATE"))
COOP = iemdb.connect('coop')
ccursor = COOP.cursor(cursor_factory=psycopg2.extras.DictCursor)
ccursor2 = COOP.cursor()
THISYEAR = mx.DateTime.now().year
META = {
'climate51' : {'sts': mx.DateTime.DateTime(1951,1,1),
'ets': mx.DateTime.DateTime(THISYEAR,1,1)},
'climate71' : {'sts': mx.DateTime.DateTime(1971,1,1),
'ets': mx.DateTime.DateTime(2001,1,1)},
'climate' : {'sts': mx.DateTime.DateTime(1893,1,1),
'ets': mx.DateTime.DateTime(THISYEAR,1,1)},
'climate81' : {'sts': mx.DateTime.DateTime(1981,1,1),
'ets': mx.DateTime.DateTime(2011,1,1)}
}
def daily_averages(table):
"""
Compute and Save the simple daily averages
"""
for st in ['nd','sd','ne','ks','mo','ia','mn','wi','il','in','oh','mi','ky']:
print 'Computing Daily Averages for state:', st
sql = """
SELECT '2000-'|| to_char(day, 'MM-DD') as d, station,
avg(high) as avg_high, avg(low) as avg_low,
max(high) as max_high, min(high) as min_high,
max(low) as max_low, min(low) as min_low,
max(precip) as max_precip, avg(precip) as precip,
avg(snow) as snow, count(*) as years,
avg( gdd50(high,low) ) as gdd50, avg( sdd86(high,low) ) as sdd86,
max( high - low) as max_range, min(high - low) as min_range
from alldata_%s WHERE day >= '%s' and day < '%s'
GROUP by d, station
""" % (st, META[table]['sts'].strftime("%Y-%m-%d"),
META[table]['ets'].strftime("%Y-%m-%d") )
ccursor.execute(sql)
for row in ccursor:
id = row['station']
if not id.upper() in nt.sts.keys():
continue
sql = """DELETE from %s WHERE station = '%s' and valid = '%s' """ % (
table, id, row['d'])
ccursor2.execute(sql)
sql = """ INSERT into """+ table +""" (station, valid, high, low, precip, snow,
max_high, max_low, min_high, min_low, max_precip, years, gdd50, sdd86, max_range,
min_range) VALUES ('%(station)s', '%(d)s', %(avg_high)s, %(avg_low)s, %(precip)s,
%(snow)s, %(max_high)s, %(max_low)s, %(min_high)s, %(min_low)s, %(max_precip)s,
%(years)s, %(gdd50)s, %(sdd86)s, %(max_range)s, %(min_range)s)""" % row
ccursor2.execute(sql)
COOP.commit()
def do_date(table, row, col, agg_col):
sql = """
SELECT year from alldata_%s where station = '%s' and %s = %s and sday = '%s'
and day >= '%s' and day < '%s'
ORDER by year ASC
""" % (row['station'][:2].lower(), row['station'], col, row[agg_col],
row['valid'].strftime("%m%d"),
META[table]['sts'].strftime("%Y-%m-%d"),
META[table]['ets'].strftime("%Y-%m-%d"))
ccursor2.execute(sql)
row2 = ccursor2.fetchone()
if row2 is not None:
sql = """ UPDATE %s SET %s_yr = %s WHERE station = '%s' and valid = '%s' """ % (
table, agg_col, row2[0], row['station'], row['valid'])
ccursor2.execute(sql)
def set_daily_extremes(table):
sql = """
SELECT * from %s
""" % (table,)
ccursor.execute(sql)
for row in ccursor:
do_date(table, row, 'high', 'max_high')
do_date(table, row, 'high', 'min_high')
do_date(table, row, 'low', 'max_low')
do_date(table, row, 'low', 'min_low')
do_date(table, row, 'precip', 'max_precip')
COOP.commit()
#daily_averages(sys.argv[1])
set_daily_extremes(sys.argv[1])
COOP.commit()
ccursor.close()
ccursor2.close()
| true |
6e015350a30b5a7e234623d7f771745ff1278133 | Python | HanifanNahwi/Python-Projects-Protek | /Chapter 8/Project13.py | UTF-8 | 735 | 3.078125 | 3 | [] | no_license | nilai = [{'nim' : 'A01', 'nama' : 'Amir', 'mid' : 50, 'uas' : 80},
{'nim' : 'A02', 'nama' : 'Budi', 'mid' : 40, 'uas' : 90},
{'nim' : 'A03', 'nama' : 'Cici', 'mid' : 50, 'uas' : 50},
{'nim' : 'A04', 'nama' : 'Dedi', 'mid' : 20, 'uas' : 30},
{'nim' : 'A05', 'nama' : 'Fifi', 'mid' : 70, 'uas' : 40}]
def tertinggi(a):
max = 0
data = {}
for b in a:
uas = b.get("uas")
mid = b.get("mid")
hitung = (mid + 2*uas)/3
if(hitung > max):
max = hitung
data["nim"] = b.get("nim")
data["nama"] = b.get("nama")
print("Nilai tertinggi draih oleh mahasiswa bernama ", data["nama"] ,"dengan NIM", data["nim"])
tertinggi(nilai)
| true |
76012fa4f7af19a8315927d4e5e62797be029cc9 | Python | LorenzoPratesi/DataSecurity | /Set_1/text_frequency.py | UTF-8 | 5,330 | 3.5 | 4 | [] | no_license | import re
import math
import matplotlib.pyplot as plot
def get_text():
return open("texts/Moby_Dick_chapter_one.txt", 'r').read().replace('\n', '')
def trim_text(text):
text = text.upper() # conversione in maiuscolo
text = re.sub(r"['\",.;:_@#()”“’—?!&$\n]+ *", " ", text) # conversione dei caratteri speciali in uno spazio
text = text.replace("-", " ") # conversione del carattere - in uno spazio
text = text.replace(" ", "") # rimozione spazi
return text
def get_letter_count(message, m):
# Returns a dictionary with keys of single letters and values of the
# count of how many times they appear in the message parameter:
letter_count = {}
for i in range(0, len(message)):
t = message[i * m:(i * m) + m]
if len(t) == m:
if t in letter_count:
letter_count[t] += 1
else:
letter_count[t] = 1
return letter_count
def get_item_at_index_zero(items):
return items[0]
def get_item_at_index_one(items):
return items[1]
def get_frequency_order(message, m):
# First, get a dictionary of each letter and its frequency count:
letter_to_freq = get_letter_count(message, m)
# convert the letter_to_freq dictionary to a list of
# tuple pairs (key, value), then sort them:
freq_pairs = list(letter_to_freq.items())
freq_pairs.sort(key=get_item_at_index_one, reverse=True)
xlist, ylist = set_xy_plot(freq_pairs)
createPlot(xlist, ylist, 'letter', 'frequency', 'LetterFrequency', get_number_x_data(m))
return freq_pairs
def get_m_grams_distributions(message, m):
letter_to_freq = get_letter_count(message, m)
total_grams = sum(letter_to_freq.values())
grams_dict = {}
for k in letter_to_freq.keys():
grams_dict[k] = letter_to_freq[k] / total_grams
sorted_grams_dict = list(grams_dict.items())
sorted_grams_dict.sort(key=get_item_at_index_one, reverse=True)
xlist, ylist = set_xy_plot(sorted_grams_dict)
createPlot(xlist, ylist, 'letter', 'probability', 'distribution', get_number_x_data(m))
return sorted_grams_dict
def get_number_x_data(m):
if m == 1:
number_x_data = 26
elif m == 2:
number_x_data = 20
elif m == 3:
number_x_data = 17
elif m == 4:
number_x_data = 13
else:
number_x_data = 10
return number_x_data
def set_xy_plot(dict):
xlist = [dict[i][0] for i in range(len(dict))]
ylist = [dict[i][1] for i in range(len(dict))]
return xlist, ylist
def index_of_confidence(message, m):
letter_to_freq = get_letter_count(message, m)
ic = 0.0
total_grams = sum(letter_to_freq.values())
for value in letter_to_freq.values():
ic += (value * (value - 1)) / (total_grams * (total_grams - 1))
return ic
def entropy(message, m):
letter_to_freq = get_letter_count(message, m)
e = 0.0
n = math.ceil(len(message) / m)
for value in letter_to_freq.values():
e += (value / n) * math.log(value / n, 2)
return -e
def createPlot(x_data, y_data, x_label, y_label, plot_title, number_x_data=26):
if number_x_data is not None:
x_data = x_data[0:number_x_data]
y_data = y_data[0:number_x_data]
plot.bar(x_data, y_data)
plot.xlabel(x_label)
plot.ylabel(y_label)
plot.title(plot_title)
plot.show()
# Print the main menu and asks user input
def menu():
while True:
print("\n---- Text Frequencies Analysis ----\n")
print("1) Histogram of the frequency of the 26 letters.")
print("2) Empirical distribution of m-grams.")
print("3) Index of coincidence and entropy of the m-grams distribution.")
print("4) Quit.\n")
try:
choice = int(input("Select a function to run: "))
if 1 <= choice <= 4:
return choice
else:
print("\nYou must enter a number from 1 to 4\n")
except ValueError:
print("\nYou must enter a number from 1 to 4\n")
def main():
# Read Moby_Dick_chapter_one.txt and sanitize for the analysis
text = trim_text(get_text())
# text = trim_text("hello world")
while True:
choice = menu()
if choice == 1:
m = int(input("\nInsert the parameter m for the m-grams: "))
letter_order = get_frequency_order(text, m)
print("\nLetter ordered by frequencies: ", letter_order)
print("\nHistogram has been plotted...")
input("\nPress Enter to continue.")
elif choice == 2:
m = int(input("\nInsert the parameter m for the m-grams: "))
distrib = get_m_grams_distributions(text, m)
print("\nEmpirical distribution of q-grams:\n", distrib)
input("\nPress Enter to continue.")
elif choice == 3:
# m = int(input("\nInsert the parameter m for the m-grams: "))
for m in range(1, 5):
ic = index_of_confidence(text, m)
print("\nIndex of coincidence of the ", m, "-grams distribution: ", ic)
print("Entropy of the m-grams distribution: ", entropy(text, m))
input("\nPress Enter to continue.")
elif choice == 4:
break
if __name__ == '__main__':
main()
| true |
b30ba08b9a017e7baa2c097816b427bff1ce30de | Python | tmibvishal/healTrip | /auth_queries.py | UTF-8 | 1,695 | 2.78125 | 3 | [] | no_license | import db
def new_user(username, email, password):
if(username=='admin'):
db.commit("insert into users(uname,email,pass,is_admin) values(%s, %s, %s, %s)", (username, email, password, True))
else:
db.commit("insert into users(uname,email,pass,is_admin) values(%s, %s, %s, %s)", (username, email, password, False))
def get_user_from_email(email):
users = db.fetch("select * from users where email=%s", (email, ))
if len(users) != 1:
return None
return users[0]
def get_user_from_uname(uname):
users = db.fetch("select * from users where uname=%s", (uname, ))
if len(users) != 1:
return None
return users[0]
def get_user_from_userid(userid):
users = db.fetch("select * from users where userid=%s", (userid, ))
if len(users) != 1:
return None
return users[0]
def update_user_details(userid, uname, email):
db.commit("update users set uname=%s, email=%s where userid=%s", (uname, email, userid))
def update_password(userid, password):
db.commit("update users set pass=%s where userid=%s", (password, userid))
def delete_user(userid):
db.commit("delete from users where userid=%s", (userid, ))
def get_all_diabled_cities():
cities = db.fetch("select * from disabled_cities;")
return cities
def disable_city(city):
"""disable a city if not already disabled"""
query = """
INSERT INTO disabled_cities (city)
SELECT * FROM (SELECT %s) AS tmp
WHERE NOT EXISTS (
SELECT city FROM disabled_cities WHERE city = %s
) LIMIT 1;
"""
db.commit(query, (city, city, ))
def enable_city(city):
db.commit("delete from disabled_cities where city=%s;", (city, )) | true |
6d8c9be56d6e219218a9b5f19451edefbe551c92 | Python | devin-liu/LTV | /CohortAnalysis.py | UTF-8 | 3,017 | 3.171875 | 3 | [] | no_license | # Import modules
import pandas as pd
import numpy as np
from datetime import datetime, timedelta, date
# Load in data set by reading the CSV
my_data = pd.read_csv('MRR Company Data Set.csv')
def get_datetime_from_string(date_string):
return datetime.strptime(date_string, '%m/%d/%y')
def get_order_period_from_date(date_object):
return date_object.strftime('%Y-%m')
def get_total_revenue_from_row(row):
days = (row['Plan Cancel'] - row['Plan Start']).days
revenue = (days / 28) * row['Monthly Payment']
return revenue
my_data['Plan Start'] = my_data['Plan Start Date'].apply(get_datetime_from_string).values
my_data['Plan Cancel'] = my_data['Plan Cancel Date'].apply(get_datetime_from_string)
my_data['Order Period'] = my_data['Plan Start'].apply(get_order_period_from_date)
my_data['Total Revenue'] = my_data.apply(get_total_revenue_from_row, axis=1)
my_data['Cohort Group'] = my_data.groupby(level=0)['Plan Start'].min().apply(lambda x: x.strftime('%Y-%m'))
groups = my_data.groupby(['Order Period']).agg({
'Customer ID': 'count',
'Total Revenue': 'sum'
})
groups.reset_index(inplace=True)
print(groups.head())
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
def dualAxis2Lines(timeAxis, y, z, title, axis_1_label, axis_2_label):
sns.set_style("darkgrid")
colors =['xkcd:sky blue','green', 'coral']
fig, ax = plt.subplots()
fig.set_size_inches(14,8)
ax.plot(timeAxis,y, color=colors[0], linewidth=4, label=axis_1_label)
ax.legend(bbox_to_anchor=(1.05, 1), loc=2)
ax.fill_between(timeAxis.dt.to_pydatetime(), y, color=colors[1], alpha=0.3) #Create an area chart
ax.set_ylabel(axis_1_label, fontsize=18, color=colors[0])
ax2 = ax.twinx()
ax2.plot(timeAxis,z, color=colors[2], linewidth=4, label=axis_2_label)
ax2.legend(bbox_to_anchor=(1.05, 1.05), loc=2)
ax2.set_ylabel(axis_2_label, fontsize=18, color=colors[2])
fig.autofmt_xdate()
fig.suptitle(title, fontsize=18)
fig.savefig('pic1.png')
title = 'MRR and Customers Count'
axis_1_label = 'MRR'
axis_2_label = 'Customers Count'
dualAxis2Lines(groups["Order Period"], groups["Total Revenue"], groups["Customer ID"], title, axis_1_label, axis_2_label)
# grouped = planOne.groupby(['Cohort Group', 'Order Period'])
# cohorts = grouped.agg({'Customer ID': pd.Series.nunique,
# 'Monthly Payment': np.sum})
# cohorts.rename(columns={'Customer ID': 'TotalUsers'}, inplace=True)
# def cohort_period(df):
# """
# Creates a `CohortPeriod` column, which is the Nth period based on the user's first purchase.
# Example
# -------
# Say you want to get the 3rd month for every user:
# df.sort(['UserId', 'OrderTime', inplace=True)
# df = df.groupby('UserId').apply(cohort_period)
# df[df.CohortPeriod == 3]
# """
# df['Cohort Period'] = np.arange(len(df)) + 1
# return df
# cohorts = cohorts.groupby(level=0).apply(cohort_period)
# print(cohorts['TotalUsers'].unstack(0))
| true |
703d36e44d1f053dfadf455aab11a46307603f49 | Python | barrosfabio/result-analysis | /convert_to_one.py | UTF-8 | 1,565 | 2.609375 | 3 | [] | no_license | import pandas as pd
import os
columns = ['none', 'ros', 'smote', 'borderline', 'adasyn', 'smote-enn', 'smote-tomek']
def write_df_csv(path, results_df):
final_results_df = pd.DataFrame(columns=columns)
final_results_df['none'] = results_df.iloc[:,0]
final_results_df['ros'] = results_df.iloc[:,1]
final_results_df['smote'] = results_df.iloc[:,2]
final_results_df['borderline'] = results_df.iloc[:,3]
final_results_df['adasyn'] = results_df.iloc[:,4]
final_results_df['smote-enn'] = results_df.iloc[:,5]
final_results_df['smote-tomek'] = results_df.iloc[:,6]
final_results_df.to_csv(path,sep=';')
def convert_to_one(results_directory, classifiers):
final_results_directory = results_directory + '\\consolidated\\'
if not os.path.isdir(final_results_directory):
os.mkdir(final_results_directory)
for classifier in classifiers:
results_folder = results_directory + '\\' +classifier
results_df = pd.DataFrame(columns=columns)
final_file_name = final_results_directory + classifier + '.csv'
dir_list = os.listdir(results_folder)
for dir in dir_list:
file_path = results_folder + '\\' + dir + '\\global\\experiment_results.csv'
print(file_path)
data_frame = pd.read_csv(file_path, sep=';')
transposed_data_frame = data_frame.transpose()
results_df = results_df.append(transposed_data_frame)
results_df = results_df.filter(like='f1_score', axis=0)
write_df_csv(final_file_name, results_df)
| true |
ad2867a3ba17b7310c7d9ade5cfcedadcb540e89 | Python | Ran4/py-contract-disallower | /tests/test.py | UTF-8 | 1,332 | 3.015625 | 3 | [] | no_license | import unittest
from disallower import disallow, require, Warn, Ignore
from base import ContractWarning, ContractException
## Predicate functions:
def negative_values(x: int) -> bool:
return x < 0
def valid_lang(s: str) -> bool:
return s.lower() in ["sv", "en"]
## Test function definitions:
@disallow(age=negative_values)
@require(lang=valid_lang)
def greet_person(age: int, lang: str):
return "Du är gammal!" if age > 80 else "Du är ung!"
@disallow(age=negative_values)
def greet_person_raise_on_negative_age(age: int, lang: str):
return "Du är gammal!" if age > 80 else "Du är ung!"
@disallow(age=negative_values)
@require(lang=valid_lang, lung=valid_lang, on_missing_policy=Warn)
def greet_person_warn_on_missing_kwarg(age: int, lang: str):
return "Du är gammal!" if age > 80 else "Du är ung!"
## Test cases
class TestGreetPerson(unittest.TestCase):
def test_valid_call(self):
greet_person(age=30, lang="sv")
def test_rasies_on_negative_age(self):
with self.assertRaises(ContractException):
greet_person_raise_on_negative_age(age=-30)
def test_warns(self):
with self.assertWarns(ContractWarning):
greet_person_warn_on_missing_kwarg(age=30, lang="sv")
if __name__ == "__main__":
unittest.main()
| true |
1c5f970757b4fe8a79d0220f0dd3dffbf5683dd2 | Python | ntpz/rbm2m | /rbm2m/action/record_importer.py | UTF-8 | 3,718 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import logging
from record_manager import RecordManager
from scan_manager import ScanManager
import scraper
from rbm2m.util import to_str
logger = logging.getLogger(__name__)
class RecordImporter(object):
def __init__(self, session, scan):
self.session = session
self.scan = scan
self._record_manager = None
self._scan_manager = None
self._next_page = None
self._has_images = []
@property
def record_manager(self):
if self._record_manager is None:
self._record_manager = RecordManager(self.session)
return self._record_manager
@property
def scan_manager(self):
if self._scan_manager is None:
self._scan_manager = ScanManager(self.session)
return self._scan_manager
@property
def has_images(self):
"""
List of ids of records with images
"""
return self._has_images
@property
def next_page(self):
"""
Number of next page in scan or none if no more pages
"""
return self._next_page
def run(self, scan, page_no):
"""
Run scrape and process results
"""
scrape = scraper.Scrape()
try:
scrape.run(scan.genre.title, page_no)
except scraper.ScrapeError as e:
raise RecordImportError(str(e))
self.update_record_count(page_no, scrape.rec_count)
self._next_page = scrape.next_page
self.process_records(scrape.records)
def process_records(self, records):
"""
Add existing records to scan and process new ones
"""
uniquify(records)
raw_record_ids = [rec['id'] for rec in records]
# First filter out all records already present in current scan
record_ids = self.scan_manager.records_not_in_scan(self.scan.id, raw_record_ids)
records = filter(lambda r: r['id'] in record_ids, records)
# find records already present in db
old_records = self.record_manager.find_existing(record_ids)
old_ids = [rec.id for rec in old_records]
# Add existing records to scan
self.scan.records.extend(old_records)
for rec_dict in records:
if rec_dict['id'] not in old_ids:
rec_dict['genre_id'] = self.scan.genre_id
rec = self.new_record(rec_dict)
self.scan.records.append(rec)
def new_record(self, rec_dict):
"""
Create new record and add images to self._has_images
"""
has_images = rec_dict.pop('has_images')
if has_images:
self._has_images.append(rec_dict['id'])
rec = self.record_manager.from_dict(rec_dict)
rec.genre_id = self.scan.genre_id
msg = to_str("Added record #{}".format(rec.id))
logger.debug(msg)
return rec
def update_record_count(self, page_no, rec_count):
"""
Update estimated records count every 10 pages
"""
if page_no is None or page_no % 10 == 0:
self.scan.est_num_records = rec_count
def uniquify(records):
"""
Remove records with duplicate ids from list. Modifies list in-place
:param records: list of records to uniquify
:return: None
"""
seen = set()
for index, record in enumerate(records):
if record['id'] in seen:
logger.warn("Duplicate record #{}, discarding".format(record['id']))
records.pop(index)
else:
seen.add(record['id'])
class RecordImportError(Exception):
"""
Unrecoverable record import error
"""
pass
| true |
8c612752cbc0760323bb904bd4539a881a99bf10 | Python | harris-ippp/hw-6-linapp | /e2.py | UTF-8 | 1,036 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
url_va = 'http://historical.elections.virginia.gov/elections/search/year_from:1924/year_to:2016/office_id:1/stage:General'
req_va = requests.get(url_va)
html_va = req_va.content #getting the contents of the website
soup = BeautifulSoup(html_va,'html.parser') #turning it into a soup object so you can manipulate in python
tags = soup.find_all('tr','election_item')
ELECTION_ID=[]
for t in tags:
year = t.td.text
year_id = t['id'][-5:]
i=[year,year_id]
ELECTION_ID.append(i)
#print(year, year_id)
Year = [item[0] for item in ELECTION_ID]
ID = [item[1] for item in ELECTION_ID]
k = dict(zip(ID, Year))
k
for t in ID:
base = 'http://historical.elections.virginia.gov/elections/download/{}/precincts_include:0/'
replace_url = base.format(t)
response = requests.get(replace_url).text
Year_data = "president_general_"+ k[t] +".csv"
with open(Year_data, 'w') as output:
output.write(response)
| true |
76446456c548660d046f8658ec3687591e281ce4 | Python | chrispun0518/personal_demo | /leetcode/88. Merge Sorted Array.py | UTF-8 | 874 | 2.859375 | 3 | [] | no_license | class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: None Do not return anything, modify nums1 in-place instead.
"""
pt1 = m - 1
pt2 = n - 1
pointer = m + n - 1
while pt1>= 0 and pt2 >=0:
if nums2[pt2] >= nums1[pt1]:
nums1[pointer] = nums2[pt2]
pt2 -= 1
pointer -= 1
else:
nums1[pointer] = nums1[pt1]
pt1 -= 1
pointer -= 1
while pt2 >=0:
nums1[pointer] = nums2[pt2]
pt2 -= 1
pointer -= 1
while pt1 >=0:
nums1[pointer] = nums1[pt1]
pt1 -= 1
pointer -= 1
return None
| true |
981b7e93b10f53cbd6223640e3312bc297d3a1d9 | Python | csvoss/onelinerizer | /tests/try_except.py | UTF-8 | 1,212 | 3.484375 | 3 | [
"MIT"
] | permissive | try:
print 'try 0'
except AssertionError:
print 'except 0'
else:
print 'else 0'
try:
print 'try 1'
assert False
except AssertionError:
print 'except 1'
else:
print 'else 1'
try:
try:
print 'try 2'
assert False
except ZeroDivisionError:
print 'wrong except 2'
else:
print 'else 2'
except AssertionError:
print 'right except 2'
else:
print 'else 2'
try:
print 'try 3'
assert False
except ZeroDivisionError:
print 'wrong except 3'
except AssertionError:
print 'right except 3'
else:
print 'else 3'
try:
print 'try 4'
assert False
except:
print 'except 4'
else:
print 'else 4'
def f():
try:
print 'try f'
return 'returned'
except AssertionError:
print 'except f'
else:
print 'else f'
print 'f: ' + f()
def g():
try:
print 'try g'
assert False
except AssertionError:
print 'except g'
return 'returned'
else:
print 'else g'
print 'g: ' + g()
def f():
try:
print 'try h'
except:
print 'except h'
else:
print 'else h'
return 'returned'
print 'h: ' + f()
| true |
49f680989861bf1a247746e373567db6702c89fa | Python | MyungSeKyo/algorithms | /백준/1748.py | UTF-8 | 538 | 3.28125 | 3 | [] | no_license | import sys
input = sys.stdin.readline
n = input().strip()
digits = len(n) - 1
n = int(n)
ret = 0
for i in range(digits):
ret += 9 * (10 ** i) * (i + 1)
ret += (n - (10 ** digits - 1)) * (digits + 1)
print(ret)
MAX = '100000000' # 9자리
sum_lst = [0]
len_all = 0
for i in range(1, len(MAX)+1) :
len_all += 9*i*10**(i-1)
sum_lst.append(len_all)
## 원래수 - 그자리수 최소값 + 1 로 계산
n = input()
diff = int(n) - 10**(len(n)-1) + 1
diff_len = diff*len(n)
aws = diff_len + sum_lst[len(n)-1]
print(aws) | true |
5eb1cb5f27bc80d8cbcff76719fc6d453ec7d806 | Python | skosarew/EpamPython2019 | /06-advanced-python/hw/task1.py | UTF-8 | 2,123 | 3.625 | 4 | [] | no_license | """
E - dict(<V> : [<V>, <V>, ...])
Ключ - строка, идентифицирующая вершину графа
значение - список вершин, достижимых из данной
Сделать так, чтобы по графу можно было итерироваться(обходом в ширину)
"""
import collections
class GraphIterator(collections.abc.Iterator):
def __init__(self, collection):
self.collection = collection
self.cursor = -1
self.root = next(iter(collection))
self.search_deque = collections.deque(self.root)
self.visited = []
self.cc = {}
self.search()
def search(self):
num_cc = 0
for i in self.collection:
# print(i)
if i not in self.visited:
num_cc += 1
self.visited.append(i)
self.search_deque = collections.deque(i)
while self.search_deque:
vertex = self.search_deque.popleft()
self.cc[vertex] = num_cc
for neighbour in self.collection[vertex]:
if neighbour not in self.visited:
self.visited.append(neighbour)
self.search_deque.append(neighbour)
print()
while self.search_deque:
vertex = self.search_deque.popleft()
for neighbour in self.collection[vertex]:
if neighbour not in self.visited:
self.visited.append(neighbour)
self.search_deque.append(neighbour)
def __next__(self):
if self.cursor + 1 >= len(self.visited):
raise StopIteration
self.cursor += 1
return self.visited[self.cursor]
class Graph:
def __init__(self, E):
self.E = E
def __iter__(self):
return GraphIterator(self.E)
E = {'A': ['B', 'E'],
'B': ['A', 'E'],
'C': ['F', 'G'],
'D': [],
'E': ['A', 'B'],
'F': ['C'],
'G': ['C']}
graph = Graph(E)
for vertex in graph:
print(vertex)
| true |
1efc8f1b8fc85ff891d7835868c1627a7bb65f1c | Python | nicokiritan/sosc-sosw-modder | /ypac_unpack.py | UTF-8 | 792 | 2.859375 | 3 | [] | no_license |
import os
import sys
import exg
if len(sys.argv) < 3:
print("Drag&drop .dat and .hed")
input()
exit()
dat_path = ""
hed_path = ""
drop_files = sys.argv[1:]
for drop_file in drop_files:
if drop_file[-4:] == ".dat":
dat_path = drop_file
elif drop_file[-4:] == ".hed":
hed_path = drop_file
if dat_path == "" or hed_path == "":
print("Drag&drop .dat and .hed")
input()
exit()
if dat_path[0:-4] != hed_path[0:-4]:
print(".dat and .hed file names are must be the same.")
input()
exit()
target_file = dat_path[0:-4]
try:
exgset = exg.EXGSet(path=target_file)
print("Start unpacking portrait...")
exgset.unpack(target_file + ".unpack")
print("Unpacking successful")
except Exception as err:
print("Unpacking failed")
print(err)
print("end")
input() | true |
172fc50d89794ed365517792ae75be9650c0d13b | Python | s0ap/arpmRes | /arpym/estimation/fit_factor_analysis.py | UTF-8 | 1,973 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from arpym.estimation.factor_analysis_paf import factor_analysis_paf
from arpym.estimation.factor_analysis_mlf import factor_analysis_mlf
from arpym.statistics.meancov_sp import meancov_sp
def fit_factor_analysis(x, k_, p=None, method='PrincAxFact'):
"""For details, see here.
Parameters
----------
x : array, shape (t_, n_) if n_>1 or (t_, ) for n_=1
k_ : scalar
p : array, shape (t_,), optional
method : string, optional
Returns
-------
alpha_hat : array, shape (n_,)
beta_hat : array, shape (n_, k_) if k_>1 or (n_, ) for k_=1
delta2 : array, shape(n_, n_)
z_reg : array, shape(t_, n_) if n_>1 or (t_, ) for n_=1
"""
t_ = x.shape[0]
if len(x.shape) == 1:
x = x.reshape((t_, 1))
if p is None:
p = np.ones(t_) / t_
# Step 1: Compute HFP mean and covariance of X
m_x_hat_hfp, s2_x_hat_hfp = meancov_sp(x, p)
# Step 2: Estimate alpha
alpha_hat = m_x_hat_hfp
# Step 3: Decompose covariance matrix
if method == 'PrincAxFact' or method.lower() == 'paf':
beta_hat, delta2_hat = factor_analysis_paf(s2_x_hat_hfp, k_)
else:
beta_hat, delta2_hat = factor_analysis_mlf(s2_x_hat_hfp, k_)
if k_ == 1:
beta_hat = beta_hat.reshape(-1, 1)
# Step 4: Compute factor analysis covariance matrix
s2_x_hat_fa = beta_hat@beta_hat.T + np.diagflat(delta2_hat)
# Step 5: Approximate hidden factor via regression
if np.all(delta2_hat != 0):
omega2 = np.diag(1/delta2_hat)
z_reg = beta_hat.T @ \
(omega2-omega2@beta_hat@
np.linalg.inv(beta_hat.T@omega2@beta_hat + np.eye(k_))@
beta_hat.T@omega2)@(x-m_x_hat_hfp).T
else:
z_reg = beta_hat.T@np.linalg.inv(s2_x_hat_fa)@(x-m_x_hat_hfp).T
return alpha_hat, np.squeeze(beta_hat), delta2_hat, np.squeeze(z_reg.T) | true |
8a875241356049a99d00341a59c5dbca861bba4b | Python | anakka6/algorithms | /algorithms/add_lists_reverse.py | UTF-8 | 2,314 | 3.859375 | 4 | [] | no_license | '''Add 342 and 465 and print 807, The lists are set up as 2->4->3 and 5->6->4. The output should be 7->0->8.'''
class Node():
def __init__(self, data):
self.data = data
self.next = None
class LinkedList():
def __init__(self, head=None):
self.head = head
def append(self, element):
current = self.head
if self.head:
while current.next is not None:
current = current.next
current.next = element
else:
self.head = element
def printList(self):
current = self.head
while current != None:
print(current.data)
current = current.next
def add_two_lists(self, N1, N2):
carry = 0
C = Node(2)
D = C
while N1 is not None or N2 is not None:
if N1 is not None and N2 is not None:
summation = N1.data + N2.data + carry
current_digit = summation % 10
carry = int(summation / 10)
print(C.data)
C.data = current_digit
C = C.next
N1 = N1.next
N2 = N2.next
if N1 is None and N2 is not None:
print('got here')
summation = carry + N2.data
current_digit = summation % 10
C.data = current_digit
C = C.next
carry = int(summation / 10)
N2 = N2.next
elif N2 is None and N1 is not None:
summation = carry + N1.data
current_digit = summation % 10
C.data = current_digit
C = C.next
carry = int(summation / 10)
N1 = N1.next
if carry is not 0:
print('got to carry step')
C.data = current_digit
C = C.next
return D
A = LinkedList()
A.append(Node(2))
A.append(Node(4))
A.append(Node(9))
print('Printing A')
A.printList()
B = LinkedList()
B.append(Node(5))
B.append(Node(6))
B.append(Node(4))
# B.append(Node(7))
print('Printing B')
B.printList()
C = LinkedList()
D = C.add_two_lists(A.head, B.head)
print('Printing C')
# C.printList()
| true |
117d0cffb5a9faa7b0918ae98a8f4ecb2e38a041 | Python | GinkgoX/MachineLearning | /KNN/digitsRecognize.py | UTF-8 | 1,435 | 3.203125 | 3 | [] | no_license | import operator
import numpy as np
from os import listdir
from sklearn.neighbors import KNeighborsClassifier as kNN
'''
Function : img2vector(filename)
Description : to covert img(in filename) to vector
Args : filename
Rets : vectorImg
'''
def img2vector(filename):
vectorImg = np.zeros((1, 1024))
fr = open(filename)
for i in range(32):
line = fr.readline()
for j in range(32):
vectorImg[0, 32*i + j] = int(line[j])
return vectorImg
'''
Function : train()
Description : use kNN to train and test digits
Args : None
Rets : None
'''
def train():
labels = []
trainSet = listdir('./digits/trainSet')
numTrain = len(trainSet)
trainMatrix = np.zeros((numTrain, 1024)) #32*32 img size
for i in range(numTrain):
filename = trainSet[i]
label = int(filename.split('_')[0])
labels.append(label)
trainMatrix[i, :] = img2vector('./digits/trainSet/%s'%(filename))
neigh = kNN(n_neighbors = 3, algorithm = 'auto')
neigh.fit(trainMatrix, labels)
testSet = listdir('./digits/testSet')
errorCount = 0.0
numTest = len(testSet)
for i in range(numTest):
filename = testSet[i]
label = int(filename.split('_')[0])
vectorImg = img2vector('./digits/testSet/%s'%(filename))
predLabel = neigh.predict(vectorImg)
print('label: %d vs predLabel: %d'%(label, predLabel))
if(label != predLabel):
errorCount += 1.0
print('Error Rate : %f%%'%(errorCount / numTest * 100))
if __name__ == '__main__':
train()
| true |
f64b6129e5015f95b71e756b183ea5598b93a179 | Python | khygu0919/codefight | /Intro/allLongestStrings.py | UTF-8 | 307 | 3.53125 | 4 | [] | no_license | '''
Given an array of strings, return another array containing all of its longest strings.
'''
def allLongestStrings(inputArray):
b=[]
c=0
for i in inputArray:
b.append(len(i))
c=max(b)
b=[]
for j in inputArray:
if len(j)==c:
b.append(j)
return b
| true |
07a08414711196f8ea857bc69f1a93a544b8b717 | Python | elezbar/Python_Tetris | /test.py | UTF-8 | 180 | 3 | 3 | [] | no_license | s = [{"name": "A", "parents": []}, {"name": "B", "parents": ["A", "C"]}, {"name": "C", "parents": ["A"]}]
def parr(d,p, i = 1):
for k in d:
if p in k[parents]
| true |
715417861c882a0e110f52f7287b320219dd9b24 | Python | gcastroid/img2mif | /img2mif.py | UTF-8 | 2,024 | 3.359375 | 3 | [
"MIT"
] | permissive | from PIL import Image
import sys
# read the arguments
img_file = sys.argv[1]
out_file = sys.argv[2]
# read the image
image = Image.open(img_file)
pixels = image.load()
h_pixels, v_pixels = image.size
# calc the number of address bits and the memory depth
h_bits = (h_pixels - 1).bit_length()
v_bits = (v_pixels - 1).bit_length()
addr_bits = h_bits + v_bits
depth = pow(2, int(addr_bits))
data_bits = 12
# print the image resolution
print("Number of hor pixels:", h_pixels)
print("Number of vert pixels:", v_pixels)
# Create the .mif output file
mif = open(out_file, "w")
# MIF header
# number of data bits
mif.write("width = ")
mif.write(str(data_bits))
mif.write(";\n")
# number of addresses
mif.write("depth = ")
mif.write(str(depth))
mif.write(";\n")
# radix
mif.write("address_radix = hex;\n")
mif.write("data_radix = hex;\n\n")
mif.write("content begin\n\n")
line = 0
# write each address value with the pixels
for i in range(v_pixels): # number of vertical pixels
for j in range(h_pixels): # number of horizontal pixels
# read the RGB888 and convert to RGB444
r,g,b = pixels[j,i]
r = r & (0xf<<4)
g = g & (0xf<<4)
b = b & (0xf<<4)
r = r << 4
b = b >> 4
rgb = r | g | b
# write the address line and data value
mif.write(str(hex(line)[2:]))
mif.write(": ")
mif.write(str(hex(rgb)[2:]))
mif.write(";\n")
# just print the actual value
print("address:", hex(line))
print("(r, g, b) = ", pixels[j,i])
print("rgb444:", hex(rgb))
print("\n")
line += 1
# complete the rest of the addresses with 0s
# if they were not filled
mif.write("\n")
if line < depth:
mif.write("[")
mif.write(str(hex(line)[2:]))
mif.write("..")
mif.write(str(hex(depth-1)[2:]))
mif.write("]: 000;\n\n")
# end of file
mif.write("end;")
# close file
mif.close()
# output message
print(".mif file generated, bye!\n") | true |
c5c04301b377f99cf2b9420248d2a3ab1c913267 | Python | Melkemann84/ProjectEuler | /projectEuler_04.py | UTF-8 | 820 | 4.1875 | 4 | [] | no_license | import time
# https://projecteuler.net/problem=4
''' Larges palindrome product
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
'''
def isPalindrome(num):
if str(num) == str(num)[::-1]:
return True
else:
return False
def main():
palindromes = []
for m in range(1,999):
for n in range (1,999):
if isPalindrome(m*n):
palindromes.append(m*n)
return max(palindromes)
start_time = time.time()
answer = main()
print("--- %s seconds ---" % (time.time() - start_time))
print("----------------------------------")
print("Answer: " + str(answer))
print("----------------------------------") | true |
95bc83ce3b68800ca1ee1ac892aff276274c3787 | Python | GyuReeKim/DailyCode | /July/code_0714_1.py | UTF-8 | 2,394 | 4.4375 | 4 | [] | no_license | # if문을 활용한 선택 프로그램 작성
import random
print("게임 이름을 입력하세요.")
game_name = input()
hunter = ["타격감", "솔플", "운영"]
survivor = ["멘탈", "팀워크", "스릴", "뚝배기"]
# 랜덤 추출1
hunt_random1 = random.choice(hunter)
surv_random1 = random.choice(survivor)
# 질문1
print(f"당신에게는 {hunt_random1}과 {surv_random1} 중에 어떤 것이 중요합니까?")
print(f"{hunt_random1}과 {surv_random1} 중에 선택하세요.")
# 대답 입력1
first_q = input()
# if문을 활용한 추천1
if first_q == "멘탈":
rec_first = "멘탈"
elif first_q == f"{hunt_random1}":
rec_first = "감시자"
else:
rec_first = "생존자"
# 먼저 나온 랜덤값을 제거하기 위한 num1 출력
if hunt_random1 == "타격감":
num1 = 0
elif hunt_random1 == "솔플":
num1 = 1
elif hunt_random1 == "운영":
num1 = 2
# num1 제거
del hunter[num1]
# 먼저 나온 랜덤값을 제거하기 위한 num2 출력
if surv_random1 == "멘탈":
num2 = 0
elif surv_random1 == "팀워크":
num2 = 1
elif surv_random1 == "스릴":
num2 = 2
elif surv_random1 == "뚝배기":
num2 = 3
# num2 제거
del survivor[num2]
# 랜덤 추출2
hunt_random2 = random.choice(hunter)
surv_random2 = random.choice(survivor)
# 질문 2
print(f"당신에게는 {hunt_random2}과 {surv_random2} 중에 어떤 것이 중요합니까?")
print(f"{hunt_random2}과 {surv_random2} 중에 선택하세요.")
# 대답 입력2
second_q = input()
# if문을 활용한 추천2
if second_q == "멘탈":
rec_second = "멘탈"
elif second_q == f"{hunt_random2}":
rec_second = "감시자"
else:
rec_second = "생존자"
# 서로 다른 선택지를 선택했을 시 랜덤 출력
character = ["감시자", "생존자"]
random_choice = random.choice(character)
# 둘 중 한가지 추천
if rec_first == "멘탈":
print(f"당신의 {rec_first}로 감시자를 하기에는 무리입니다. 생존자를 추천합니다.")
elif rec_second == "멘탈":
print(f"당신의 {rec_second}로 감시자를 하기에는 무리입니다. 생존자를 추천합니다.")
elif rec_first == rec_second:
recommend = rec_first
print(f"당신에게는 {game_name}의 {recommend}를 추천합니다.")
else:
recommend = random_choice
print(f"당신에게는 {game_name}의 {recommend}를 추천합니다.") | true |
4b0bdefee6479b70711da69cfccc8739ca61f69f | Python | pchatanan/AllState | /src/AllState.py | UTF-8 | 19,852 | 3.171875 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
# Print all rows and columns. Dont hide any
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
# Disable SettingWithCopyWarning
pd.options.mode.chained_assignment = None # default='warn'
# Use this seed for all random states
seed = 0
# for data preprocessing using normalization
scaler_x = StandardScaler()
scaler_y = StandardScaler()
# In[2]:
# XGBoost bug hot-fix:
# XGBoost cannot predict large test dataset at one go, so we divide test set into small chuck
def getPred(model,X_val):
chunk = 5000 #chunk row size
X_val_chunks = [X_val[i:i+chunk] for i in range(0,X_val.shape[0],chunk)]
pred = []
for X_val_chunk in X_val_chunks:
pred.append(model.predict(X_val_chunk))
pred = np.concatenate(pred)
return pred
# ## Load raw data:
# - In this step, we save `'ID'` column for test set so we can construct `submission.csv` file after prediction.
# In[3]:
raw_train = pd.read_csv("../data/train.csv")
raw_test = pd.read_csv("../data/test.csv")
# Save the id's for submission file
ID = raw_test['id']
# drop 'id' column
raw_train.drop('id',axis=1, inplace=True)
raw_test.drop('id',axis=1, inplace=True)
#Display the first row to get a feel of the data
print(raw_train.head(1))
# ## Split dataset:
# - index 0 - 115 are category
# - index 116(included) onwards are numerical
# In[4]:
#scaler = StandardScaler().fit(raw_train)
last_discrete_idx = 116
raw_train_discrete = raw_train.iloc[:,:last_discrete_idx]
raw_train_continuous = raw_train.iloc[:,last_discrete_idx:-1]
raw_trainY = raw_train[['loss']]
raw_test_discrete = raw_test.iloc[:,:last_discrete_idx]
raw_test_continuous = raw_test.iloc[:,last_discrete_idx:]
# ## Data statistics:
# - Shape
# - Description
# - Skew
# In[5]:
temp = pd.concat([raw_train_discrete, raw_train_continuous, raw_trainY], axis=1)
print(temp.shape)
# Observe that means are 0s and standard deviations are 1s
print(temp.describe())
print(temp.skew())
# ## Data Transformation:
# - Skew correction
# - We use `log shift` method to improve the skewness of the `'loss'` column
# - We try shift values [0, 1, 10, 100, 500, 1000] and plot the graph of it.
#
# **Result: **
#
# - Best shift is 0
#
# **Take Note: **
#
# - We have to use `np.exp()` later to convert back the `'loss'` after prediction
# In[6]:
temp = raw_trainY['loss']
original_skew = temp.skew()
print('Skewness without log shift: ' + str(original_skew))
shifts = [0, 1, 10, 100, 500, 1000]
temp_result = []
for shift in shifts:
shifted = np.log(temp + shift)
temp_result.append(shifted.skew())
val, idx = min((val, idx) for (idx, val) in enumerate(temp_result))
best_shift = shifts[idx]
print('Best shift: ' + str(shifts[idx]))
print('Skewness with log shift: ' + str(val))
plt.plot(shifts, temp_result)
plt.show()
raw_trainY['loss'] = np.log(raw_trainY['loss'] + best_shift)
# ## Data Pre Processing:
# - Normalization (Z-Scoring)
#
# **Take Note: **
#
# - We split data to X (all the features) and Y (loss) and perform normalization separately. This is so that we can use `scaler_y` to inverse transform the prediction of loss later.
# - We only use the train set to fit the normalization.
# In[7]:
scaler_x.fit(raw_train_continuous)
scaler_y.fit(raw_trainY)
# Save columns name
col_name_X = raw_train_continuous.columns.values
col_name_Y = raw_trainY.columns.values
# transform
clean_train_continuous = scaler_x.transform(raw_train_continuous)
clean_trainY = scaler_y.transform(raw_trainY)
clean_test_continuous = scaler_x.transform(raw_test_continuous)
clean_train_continuous = pd.DataFrame(data=clean_train_continuous, columns=col_name_X)
clean_trainY = pd.DataFrame(data=clean_trainY, columns=col_name_Y)
clean_test_continuous = pd.DataFrame(data=clean_test_continuous, columns=col_name_X)
# ## Data Visualization:
# - Categorical attributes
# - It can be observed that cat1 to 98 have significantly less number of categories than cat99 to 116.
# In[8]:
# Count of each label in each category
try:
count_result = pd.read_pickle('../intermediate/count_result')
except FileNotFoundError:
temp = pd.concat([raw_train_discrete, raw_test_discrete])
count_result = temp.apply(pd.value_counts)
count_result.to_pickle('../intermediate/count_result')
#names of all the columns
cols = count_result.columns
# Plot count plot for all attributes in a 29x4 grid
n_cols = 4
n_rows = 29
fig, axes = plt.subplots(n_rows, n_cols, sharey=True, figsize=(12, 100))
for i in range(n_rows):
for j in range(n_cols):
col_name = cols[i*n_cols+j]
temp = count_result[col_name]
temp = temp.dropna()
axes[i, j].hist(temp.index.values.tolist(), weights=temp.tolist())
axes[i, j].set_title(col_name)
plt.savefig('../intermediate/count_plot.png', dpi=100)
# ## Feature Engineering:
# ### Motivation:
# - Using One-Hot encoding to all categorical data may increase the number of features substantially and this requires long computational time
# ### Approach:
# - For features with small number of categories (cat1-98), we use one-hot encoding
# - For features with large number of categories (cat99-116), we use ordinal encoding
# In[9]:
n_train = raw_train_discrete.shape[0]
n_test = raw_test_discrete.shape[0]
split = 98
one_hot_train = raw_train_discrete.iloc[:,:split]
one_hot_test = raw_test_discrete.iloc[:,:split]
one_hot_temp = pd.concat([one_hot_train, one_hot_test])
ordinal_train = raw_train_discrete.iloc[:,split:]
ordinal_test = raw_test_discrete.iloc[:,split:]
ordinal_temp = pd.concat([ordinal_train, ordinal_test])
# One-Hot encoding
one_hot_temp = pd.get_dummies(one_hot_temp)
# Ordinal encoding
from sklearn.preprocessing import LabelEncoder
ordinal_temp = ordinal_temp.apply(LabelEncoder().fit_transform)
encoded = pd.concat([one_hot_temp, ordinal_temp], axis=1)
print(encoded.shape)
raw_train_discrete_encoded = encoded.iloc[:n_train,:]
raw_test_discrete_encoded = encoded.iloc[n_train:,:]
# ## Data Preparation:
# - Split into train and validation
# - We use K-Fold method with k = 5
# - We also declare `mean_absolute_error` as a scoring parameter
# In[10]:
XY_train = pd.concat([raw_train_discrete_encoded, clean_train_continuous, clean_trainY], axis=1)
X_test = pd.concat([raw_test_discrete_encoded, clean_test_continuous], axis=1)
print('Number of dataset: ')
print('Train: ' + str(XY_train.shape[0]))
print('Test: ' + str(X_test.shape[0]))
from sklearn.model_selection import KFold
kf = KFold(n_splits=5, random_state=seed, shuffle=False)
# Scoring parameter
from sklearn.metrics import mean_absolute_error
# ## Artificial Neural Network (ANN):
# - We use keras with Tensorflow backend here
# - The ANN we considered are baseline, small, deeper, custom
# - We use epoch (training round) = 30
# In[11]:
# This list will contain ANN models
nn_models = []
try:
r,c = XY_train.shape
#Import libraries for deep learning
from keras.wrappers.scikit_learn import KerasRegressor
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
# define baseline model
def baseline(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1), input_dim=v*(c-1), kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='adam')
return model
# define smaller model
def smaller(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1)//2, input_dim=v*(c-1), kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='relu'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='adam')
return model
# define deeper model
def deeper(v):
# create model
model = Sequential()
model.add(Dense(v*(c-1), input_dim=v*(c-1), kernel_initializer='normal', activation='relu'))
model.add(Dense(v*(c-1)//2, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='relu'))
# Compile model
model.compile(loss='mean_absolute_error', optimizer='adam')
return model
# custom neural net
def custom(v):
model = Sequential()
model.add(Dense(250, input_dim = c-1, kernel_initializer = 'normal'))
model.add(Dense(100, kernel_initializer = 'normal', activation='relu'))
model.add(Dense(50, kernel_initializer = 'normal', activation='relu'))
model.add(Dense(1, kernel_initializer = 'normal', activation='relu'))
model.compile(loss = 'mean_absolute_error', optimizer = 'adam')
return(model)
est_list = [('MLP',baseline),('smaller',smaller),('deeper',deeper),('custom', custom)]
for name, est in est_list:
temp = {}
temp['name'] = name
temp['model'] = KerasRegressor(build_fn=est, v=1, nb_epoch=30, verbose=0)
nn_models.append(temp)
except ModuleNotFoundError:
print('Tensorflow is not installed with GUP support')
# ## Tuning XGBoost hyperparameters:
#
# During this step, we fist find the best n_estimators for different max_depth for max_depth = [5,6,7,8].
#
# - max_depth
# - n_estimators
# In[12]:
xgb_dn_models = []
try:
from xgboost import XGBRegressor
dn_list = [
(8,[230,240,250]),
(7,[280,300,320]),
(6,[380,400,420]),
(5,[760,780,800])
]
for d, n_list in dn_list:
for n in n_list:
model = XGBRegressor(n_estimators=n, seed=seed, tree_method='gpu_hist', max_depth=d, gamma=3, min_child_weight=3, learning_rate=0.09)
temp = {}
temp['name'] = "XGB-d" + str(d) + "-n" + str(n)
temp['model'] = model
xgb_dn_models.append(temp)
except ModuleNotFoundError:
print('XGBoost is not installed with GUP support')
# ## Tuning XGBoost hyperparameters:
#
# For different max_depth, we tune the value of gamma parameter.
#
# - gamma
# In[13]:
xgb_g_models = []
try:
from xgboost import XGBRegressor
gamma_list = np.array([0, 1, 3])
dn_list = [
(8,240),
(7,280),
(6,400),
(5,780)
]
for d,n in dn_list:
for gamma in gamma_list:
model = XGBRegressor(n_estimators=n, seed=seed, tree_method='gpu_hist', max_depth=d, min_child_weight=3, gamma=gamma, learning_rate=0.09)
temp = {}
temp['name'] = "XGB-d" + str(d) + "-n" + str(n) + "-g" + str(gamma)
temp['model'] = model
xgb_g_models.append(temp)
except ModuleNotFoundError:
print('XGBoost is not installed with GUP support')
# ## Tuning XGBoost hyperparameters:
#
# For different max_depth, we tune the value of min_child_weight parameter.
#
# - min_child_weight
# In[14]:
xgb_mcw_models = []
try:
from xgboost import XGBRegressor
mcw_list = np.array([2, 3, 4, 5])
dng_list = [
(8,240,3),
(7,280,0),
(6,400,3),
(5,780,0)
]
for d,n,g in dng_list:
for mcw in mcw_list:
model = XGBRegressor(n_estimators=n, seed=seed, tree_method='gpu_hist', max_depth=d, gamma=g, min_child_weight=mcw, learning_rate=0.09)
temp = {}
temp['name'] = "XGB-d" + str(d) + "-mcw" + str(mcw)
temp['model'] = model
xgb_mcw_models.append(temp)
except ModuleNotFoundError:
print('XGBoost is not installed with GUP support')
# ## Tuning XGBoost hyperparameters:
#
# For different max_depth, we tune the value of learning_rate parameter.
#
# - learning rate
# In[15]:
xgb_lr_models = []
try:
from xgboost import XGBRegressor
lr_list = np.array([0.08, 0.09, 0.1])
dng_list = [
(8,240,3,5),
(7,280,0,5),
(6,400,3,4),
(5,780,0,5)
]
for d,n,g,mcw in dng_list:
for lr in lr_list:
model = XGBRegressor(n_estimators=n, seed=seed, tree_method='gpu_hist', max_depth=d, gamma=g, min_child_weight=mcw, learning_rate=lr)
temp = {}
temp['name'] = "XGB-d" + str(d) + "-lr" + str(lr)
temp['model'] = model
xgb_lr_models.append(temp)
except ModuleNotFoundError:
print('XGBoost is not installed with GUP support')
# ## Add one more model for depth = 4:
# In[16]:
xgb_test_models = []
try:
from xgboost import XGBRegressor
lr_list = np.array([0.08, 0.09])
dng_list = [
(4,2000,3,3)
]
for d,n,g,mcw in dng_list:
for lr in lr_list:
#Set the base model
model = XGBRegressor(n_estimators=n, seed=seed, tree_method='gpu_hist', max_depth=d, gamma=g, min_child_weight=mcw, learning_rate=lr)
temp = {}
temp['name'] = "XGB-d" + str(d) + "-lr" + str(lr)
temp['model'] = model
xgb_test_models.append(temp)
except ModuleNotFoundError:
print('XGBoost is not installed with GUP support')
# ## Find best models:
# - Run all models to find the one with smallest MAE for different max_depths.
# In[17]:
import pickle
all_models = [
('model_result_nn', nn_models),
('model_result_dn', xgb_dn_models),
('model_result_g', xgb_g_models),
('model_result_mcw', xgb_mcw_models),
('model_result_lr', xgb_lr_models),
('model_result_d4', xgb_test_models)
]
all_model_results = {}
for file_name, models in all_models:
try:
with open('../result/' + file_name, "rb") as f:
model_result = pickle.load(f)
all_model_results.update(model_result)
for name, model_dict in model_result.items():
print(name + " %s" % model_dict['avg_mean'])
except FileNotFoundError:
model_result = {}
for d in models:
model = d['model']
name = d['name']
model_result[name] = {}
model_result[name]['pred'] = []
model_result[name]['mean'] = []
print("executing " + name)
for i, (train_idx, val_idx) in enumerate(kf.split(XY_train)):
print(i)
X_train = XY_train.iloc[train_idx,:-1]
X_val = XY_train.iloc[val_idx,:-1]
Y_train = XY_train.iloc[train_idx,-1]
Y_val = XY_train.iloc[val_idx,-1]
model.fit(X_train,Y_train)
pred = getPred(model, X_val)
model_result[name]['pred'].append(pred)
result = mean_absolute_error(np.exp(scaler_y.inverse_transform(Y_val)) - best_shift, np.exp(scaler_y.inverse_transform(pred)) - best_shift)
model_result[name]['mean'].append(result)
mean = np.mean(model_result[name]['mean'])
print(name + " %s" % mean)
model_result[name]['avg_mean'] = mean
with open('../result/' + file_name, "wb") as f:
pickle.dump(model_result, f)
all_model_results.update(model_result)
# ## Perform Stacking:
# - A method to conbine predictions of multiple models
# In[18]:
import pickle
model_used = ['XGB-d8-lr0.08', 'XGB-d7-lr0.08', 'XGB-d6-lr0.08', 'XGB-d5-lr0.08', 'XGB-d4-lr0.08']
np.random.seed(seed)
minimum = 2000
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
preds = np.array([all_model_results[name]['pred'] for name in model_used])
mae = []
X_ensem = None
Y_ensem = None
for i, (train_idx, val_idx) in enumerate(kf.split(XY_train)):
X_train = XY_train.iloc[train_idx,:-1]
X_val = XY_train.iloc[val_idx,:-1]
Y_train = XY_train.iloc[train_idx,-1]
Y_val = XY_train.iloc[val_idx,-1]
pred = np.array([list(preds[a][i]) for a in range(5)]).T
if X_ensem is None:
X_ensem = pred
Y_ensem = Y_val
else:
X_ensem = np.concatenate((X_ensem, pred), axis=0)
Y_ensem = np.concatenate((Y_ensem, Y_val), axis=0)
try:
with open('../result/' + "ensemble_model", "rb") as f:
ensemble_model = pickle.load(f)
except FileNotFoundError:
ensemble_model = SVR(C=1)
print("fitting")
print(X_ensem.shape)
print(Y_ensem.shape)
ensemble_model.fit(X_ensem, Y_ensem)
print("fitting done")
with open('../result/' + "ensemble_model", "wb") as f:
pickle.dump(ensemble_model, f)
print("predicting")
pred = ensemble_model.predict(X_ensem)
result = mean_absolute_error(np.exp(scaler_y.inverse_transform(Y_ensem)) - best_shift, np.exp(scaler_y.inverse_transform(pred)) - best_shift)
print('result: ' + str(result))
# ## Make Predictions:
# In[19]:
import pickle
try:
with open('../result/' + "predictions", "rb") as f:
predictions = pickle.load(f)
except FileNotFoundError:
try:
from xgboost import XGBRegressor
X = XY_train.iloc[:,:-1]
Y = XY_train.iloc[:,-1:]
m1 = XGBRegressor(n_estimators=240,seed=seed, tree_method='gpu_hist',
max_depth=8,
gamma=3,
min_child_weight=5,
learning_rate=0.08)
m2 = XGBRegressor(n_estimators=280,seed=seed, tree_method='gpu_hist',
max_depth=7,
gamma=0,
min_child_weight=5,
learning_rate=0.08)
m3 = XGBRegressor(n_estimators=400,seed=seed, tree_method='gpu_hist',
max_depth=6,
gamma=3,
min_child_weight=4,
learning_rate=0.08)
m4 = XGBRegressor(n_estimators=780,seed=seed, tree_method='gpu_hist',
max_depth=5,
gamma=0,
min_child_weight=5,
learning_rate=0.08)
m5 = XGBRegressor(n_estimators=2000,seed=seed, tree_method='gpu_hist',
max_depth=4,
gamma=3,
min_child_weight=3,
learning_rate=0.08)
m1.fit(X,Y)
pred1 = getPred(m1, X_test)[:, None]
print(pred1.shape)
print("done fit 1")
m2.fit(X,Y)
pred2 = getPred(m2, X_test)[:, None]
print("done fit 2")
m3.fit(X,Y)
pred3 = getPred(m3, X_test)[:, None]
print("done fit 3")
m4.fit(X,Y)
pred4 = getPred(m4, X_test)[:, None]
print("done fit 4")
m5.fit(X,Y)
pred5 = getPred(m5, X_test)[:, None]
print("done fit 5")
predictions = np.concatenate((pred1, pred2, pred3, pred4, pred5), axis=1)
with open('../result/' + "predictions", "wb") as f:
pickle.dump(predictions, f)
except ModuleNotFoundError:
print('XGBoost is not installed with GUP support')
print(predictions.shape)
predictions = ensemble_model.predict(predictions)
predictions = np.exp(scaler_y.inverse_transform(predictions)) - best_shift
# Write submissions to output file in the correct format
with open("submission.csv", "w") as subfile:
subfile.write("id,loss\n")
for i, pred in enumerate(list(predictions)):
subfile.write("%s,%s\n"%(ID[i],pred))
print("Done")
| true |
b6aaacd39fb2e27ab205d5d724e079ea3ba7a982 | Python | aljeshishe/tickets | /proxies/parse.py | UTF-8 | 508 | 2.59375 | 3 | [] | no_license | import sys
import json
import re
from collections import defaultdict
d = defaultdict(lambda: defaultdict(int))
with open(sys.argv[1]) as f:
for line in f:
protos, domen = re.match('.+\[(.+)\].+ (.+)>', line).groups()
protos = protos.split(', ')
print(protos, domen)
for proto in protos:
proto = proto.replace(': ', '-')
d[domen][proto] += 1
for domen, protos in d.items():
print(domen, ' '.join(['%s:%s' % (k, v) for k, v in protos.items()]))
| true |
4ef264f871bfafdb542384612ecff49659b5b2e2 | Python | benpmeredith/Ames_Iowa_Exercise | /lib/__init__.py | UTF-8 | 583 | 2.671875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
import warnings
warnings.filterwarnings('ignore')
np.random.seed(42)
from IPython.display import display
from bs4 import BeautifulSoup
import csv
print('Pandas Initiated')
print('Numpy Initiated')
print('Matplotlib Initiated')
print('Seaborn Initiated')
print('tqdm Initiated')
print('Warnings: Off')
print('Random Seed: 42')
print('IPython Display Initiated')
print('BeautifulSoup Initiated')
print('Import csv Initiated')
print('Please initiate %matplotlib inline')
| true |
212c18ac96bf1804d5ba1172d4b71705400144de | Python | pablo-solis/VARDER | /utilsVAR.py | UTF-8 | 10,075 | 2.609375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nltk
import yfinance as yf
# from nltk.sentiment.vader import SentimentIntensityAnalyzer
import io
import base64
import re
import seaborn as sns
# import bls
# Import Statsmodels
from statsmodels.tsa.api import VAR
from random import choice
sns.set_style('darkgrid')
# -------- for plotting -----------
def html_plot():
# ----- save image as a crazy string
img = io.BytesIO()
plt.savefig(img, format='png')
img.seek(0)
base64.b64encode(img.getvalue())
plot_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return plot_url
#---------
# generates the main plot for current prediction
def purchase_power_VARDER(df,savings=1000,horizon='3months'):
# get number of months
x = re.search(r'\d+',horizon)
n_months = int(x.group())
# df should have columns CPI,YOY
# generate purchase power plot based on model
base_pp = savings
base = df.loc['2019-08-01','CPI']
df['infl'] = (df['CPI']/base - 1) # definition of inflation
df['purchase_power'] = savings/(1+df['infl']) # infaltion decay
# create VARDER forcast
# add behavior with investing
# assume a 7.10 growth rate
# also account if savings is less than 3000
if savings>3000:
apy = 7.10
mpy = (1+apy/100)**(1/12)
else:
apy = 2.5
mpy = (1+apy/100)**(1/12)
# find index location of base
inv_start = df.index.get_loc('2019-08-01')
old_list = [savings]*len(df)
# change this many values
n_inv = len(old_list[inv_start:])
inv_values = [base_pp*(mpy)**(i) for i in range(1,n_inv+1)]
old_list[-(len(inv_values)):] = inv_values
new_list = old_list[:inv_start]+inv_values
new_df = pd.DataFrame({'purchase_power':df['purchase_power'].values,'VARDER':old_list})
new_df.index = df.index
# add inflation effects
new_df['VARDER']=new_df['VARDER']/(1+df['infl'])
VARDER_return = new_df['VARDER'].values[-1]
# see the decay over their horizon
# use inv_start
# get value of purchase_power at i+n_months
temp= df['purchase_power'].iloc[inv_start+n_months]
# get the last value of df['VARDER']
loss= VARDER_return - temp # should be positive number
# create a nice plot
ax = new_df[['purchase_power','VARDER']].tail(n_months+3).plot()
ax.set_ylabel('Aug 2019 usd')
plt.title('purchasing power of your savings')
# store as 64bitstring
figure = html_plot()
return figure, loss
def historical_VWELX():
df = pd.read_csv('flaskexample/static/VWELX.csv',index_col = 'Unnamed: 0',parse_dates=True)
return df
def historical_VIPSX():
df = pd.read_csv('flaskexample/static/VIPSX.csv',index_col = 'Unnamed: 0',parse_dates=True)
return df
def historical_CPI():
df = pd.read_csv('flaskexample/static/CPI.csv',index_col = 'Unnamed: 0',parse_dates=True)
return df
def bt_tickr_savings_df(date = '2017-08-01',savings=1000,horizon=3):
# this is where the model takes ACTION
s_year = date[:4]
s_month = date[5:7]
tikr = signal_from_infl(year = s_year,month = s_month,n_steps = horizon)
# tikr = choice(['VIPSX','VWELX'])
tikr_monthly = historical_VWELX() if tikr == 'VWELX' else historical_VIPSX()
# get the slice based on dates and horizon
start = pd.to_datetime(date)
end = start+pd.offsets.MonthBegin(horizon)
tikr_monthly_slice = tikr_monthly[start:end]
first_price = tikr_monthly_slice['Close'].values[0]
n_shares = int(savings/first_price)
remainder = savings - n_shares*first_price
# multiply by number of shares + remainder
tikr_monthly_slice['invested'] = tikr_monthly_slice['Close']*n_shares+remainder
# get cpi data use to compute inflation
cpi_df = historical_CPI()
cpi_trim = cpi_df[start:end]
new_df = pd.DataFrame({tikr:tikr_monthly_slice['invested'].values,'CPI':cpi_trim['CPI'].values})
new_df.index = tikr_monthly_slice.index
# set base to calculate inflation
base = new_df['CPI'].values[0]
new_df['infl'] = new_df['CPI']/base -1
new_df['Savings'] = savings/(1+new_df['infl'])
new_df[tikr] = new_df[tikr]/(1+new_df['infl'])
delta = new_df[tikr].values[-1] - new_df['Savings'].values[-1]
return new_df[[tikr,'Savings']],delta,tikr
def interpret_delta(delta):
if delta>0:
return f'Investing leads to a gain of ${delta:.2f} in purchasing power.'
else:
return f'Investing leads to a loss of ${abs(delta):.2f} in purchasing power.'
def bt_purchase_power(df,tikr='VWELX',date = '2017-08-01',savings=1000,horizon='3months'):
#
# df should have columns CPI and YOY
# but for now I'll just need CPI
# make sure to provide date
# get number of months
x = re.search(r'\d+',horizon)
n_months = int(x.group())
# df should have columns CPI,YOY
# generate purchase power plot based on model
base_pp = savings
base = df.loc[date,'CPI']
df['infl'] = (df['CPI']/base - 1) # definition of inflation
df['purchase_power'] = savings/(1+df['infl']) # infaltion decay
# now all I need to do is get actual stock data
def infl_forecast_values(year='2001',month='02',n_steps = 6):
# n_steps is how far into future you look
# crop the data depending on n_steps and date
orig_df = load_data()
date = form_date(year,month)
train, test=crop_data(orig_df,date,n_steps)
#take first difference
first_row, train_1 = take_diff(train)
first_YOY = first_row['YOY']
# create VAR model
model = VAR(train_1,freq = 'MS')
#for now fit to 4
results = model.fit(4)
lag_order = results.k_ar
prediction_input = train_1.values[-lag_order:]
# I want last column
infl_results = results.forecast(prediction_input, n_steps)[:,1]
return infl_results
def signal_from_infl(year='2001',month='02',n_steps = 6):
# get forecast
fc = infl_forecast_values(year='2001',month='02',n_steps = 6)
signal = sum(fc)
if signal>0:
return 'VWELX'
else:
return 'VIPSX'
def back_test(year='2001',month='02',n_steps = 6,today = False):
# n_steps is how far into future you look
# if today then don't need to compute mean square error
if today:
date = form_date('2019','08')
else:
date = form_date(year,month)
# used when today = True
end_date= pd.to_datetime(date)+pd.offsets.MonthBegin(n_steps)
orig_df = load_data()
# crop the data depending on n_steps and date
train, test=crop_data(orig_df,date,n_steps)
#take first difference
first_row, train_1 = take_diff(train)
first_YOY = first_row['YOY']
# create VAR model
model = VAR(train_1,freq = 'MS')
#for now fit to 4
results = model.fit(4)
lag_order = results.k_ar
prediction_input = train_1.values[-lag_order:]
# I want last column
infl_results = results.forecast(prediction_input, n_steps)[:,1]
#previous inflation values
prev_infl = train_1.values[:,1]
# integrate fc
infl_with_fc = integrate_prediction(prev_infl,infl_results,first_YOY)
# just return results for current
if today:
# returns mean lower upper
# fig = results.plot_forecast(10)
# results.forecast_interval(y = prediction_input,steps = 6)
# create prediction index
# could change overlap later
# want overlap so that I can compute CPI from YOY
overlap = 24
idx = pd.date_range(end = end_date,freq = 'MS',periods = n_steps+overlap)
values = infl_with_fc[-(n_steps+overlap):]
fc_df = pd.DataFrame({'YOY':values})
fc_df.index = idx
# now need to add CPI data based on YOY
YOY_CPI_fc = orig_df[['YOY','CPI']].reindex(index = fc_df.index)
# now update the YOY values
YOY_CPI_fc['YOY'] = fc_df['YOY']
# now compute 'CPI' from 'YOY'
YOY_CPI_fc['CPI'] = ((YOY_CPI_fc['YOY']/100)+1)*YOY_CPI_fc['CPI'].shift(12)
m = len(YOY_CPI_fc)
# return the non nan values
YOY_CPI_fc.tail(m-12)
return YOY_CPI_fc
# create dataframe with prediction...
# should return orig with VARDER column series for YOY fc
with_fc_df = append_fc(orig_df,date,n_steps,fc=infl_with_fc)
# now you need to integrate results
return with_fc_df
# for start of back test
def form_date(year,month):
return year+'-'+month+'-'+'01'
# main data I'm working with
def load_data():
df = pd.read_csv('flaskexample/static/neg_CPI_YOY.csv',index_col='Unnamed: 0',parse_dates=True)
return df[['neg/l','YOY','CPI']]
form_date('1997','02')
# crop data accorind to date
def crop_data(df,date,n):
# date should be string or pandas datetime
# n is number of additonal rows to get
# get i loc of date
i = df.index.get_loc(date)
# return everything up to i plus the next n values
# before 12 the values are nans
train = df.iloc[12:(i+1)]
test = df.iloc[(i+1):(i+1)+n]
return train,test
def append_fc(df,date,n,fc=0):
# date should be string or pandas datetime
# n is number of additonal rows to get
# get i loc of date
i = df.index.get_loc(date)
# return everything up to i plus the next n values
df = df.iloc[12:(i+1)+n]
# now df and fc should have same length
df['VARDER'] = fc
return df
def append_full_df():
# return all columns
pass
def take_diff(df):
# record first row
first_row = df.iloc[0]
return first_row, df.diff().dropna()
# just takes list and value
def arr_undo_diff(lst,val):
temp = [val]+lst
result = []
for i in range(0,len(temp)):
result.append(sum(temp[:i+1]))
return result
def integrate_prediction(prev,fc,first):
# previous values,fc to append, first value of undifferenced forecase
# append
arr = np.append(prev,fc)
arr = np.append(first,arr)
result = []
for i in range(0,len(arr)):
result.append(sum(arr[:i+1]))
return result
| true |
57aa59e3207780fe30917d50be4f7db06003f95a | Python | JohnCorley/PythonLearning | /FirstExample.py | UTF-8 | 601 | 4.25 | 4 | [] | no_license | Garage = "Tesla", "Lexus", "Bike"
for each_car in Garage:
print(each_car)
print('He said \"Hello There\"')
print(4**4)
count = 0
while count < 10:
print("The count is ",count)
count += 1
for y in range(1,100,7):
print ("Y is :",y)
count=int(input("Enter Count"))
if count < y:
print ("Count is less than Y \n")
elif count > y:
print ("Count is greater than Y \n")
else:
print ("must be equal")
def MyPower(num):
answer = num ** num
return(answer)
print(MyPower(10))
print (MyPower(100))
def task1():
return()
def task2():
print ("Hello") | true |
1b230ed871a9c39da46b3884badc0af5651434bd | Python | mercurialjc/cryptopals | /implement_pkcs7_padding.py | UTF-8 | 910 | 3.984375 | 4 | [] | no_license | #!/usr/bin/env python
"""Implement PKCS#7 padding
A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext. But we almost never want to transform a single block; we encrypt irregularly-sized messages.
One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even multiple of the blocksize. The most popular padding scheme is called PKCS#7.
So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block. For instance,
"YELLOW SUBMARINE"
... padded to 20 bytes would be:
"YELLOW SUBMARINE\x04\x04\x04\x04"
"""
def pkcs7_padding(string, length):
size = length - len(string)
number_array = [size for i in range(size)]
return string+str(bytearray(number_array))
def main():
print pkcs7_padding('YELLOW SUBMARINE', 20)
if __name__ == '__main__':
main()
| true |
652dfa5591681ac300a834e68b0884eeb2351367 | Python | PencilCode/pencilcode | /content/lib/pencilcode.py | UTF-8 | 7,259 | 2.875 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | import pencilcode_internal
# The SpriteObject class wraps a jQuery-turtle object so it can be used in Python.
# This includes Turtle, Sprite, Piano, and Pencil objects.
class SpriteObject():
def __init__(self, jsSpriteObject):
self.jsSpriteObject = jsSpriteObject
###################
## Move Commands ##
###################
def fd(self, distance):
pencilcode_internal.fd(self.jsSpriteObject, distance)
def bk(self, distance):
pencilcode_internal.bk(self.jsSpriteObject, distance)
def rt(self, angle):
pencilcode_internal.rt(self.jsSpriteObject, angle)
def lt(self, angle):
pencilcode_internal.lt(self.jsSpriteObject, angle)
def ra(self, radius, angle):
pencilcode_internal.ra(self.jsSpriteObject, radius, angle)
def la(self, radius, angle):
pencilcode_internal.la(self.jsSpriteObject, radius, angle)
def speed(self, value):
pencilcode_internal.speed(self.jsSpriteObject, value)
def home(self):
pencilcode_internal.home(self.jsSpriteObject)
def turnto(self, value):
pencilcode_internal.turnto(self.jsSpriteObject, value)
def moveto(self, x, y):
pencilcode_internal.moveto(self.jsSpriteObject, x, y)
def movexy(self, x, y):
pencilcode_internal.movexy(self.jsSpriteObject, x, y)
def jumpto(self, x, y):
pencilcode_internal.jumpto(self.jsSpriteObject, x,y)
def jumpxy(self, x, y):
pencilcode_internal.jumpxy(self.jsSpriteObject, x, y)
def pause(self, value):
pencilcode_internal.sleep(self.jsSpriteObject, value)
######################
## Control Commands ##
######################
def button(self, buttonClick, callee):
pass#pencilcode_internal.button(self.jsSpriteObject, buttonClick, callee)
def keydown(self, key):
pass#pencilcode_internal.keydown(self.jsSpriteObject, key)
def click(self, t):
pass#pencilcode_internal.click(self.jsSpriteObject, t)
##################
## Art Commands ##
##################
def hide(self):
pencilcode_internal.hide(self.jsSpriteObject)
def show(self):
pencilcode_internal.show(self.jsSpriteObject)
def cs(self):
pass#pencilcode_internal.cs(self.jsSpriteObject)
def ht(self):
pencilcode_internal.ht(self.jsSpriteObject)
def st(self):
pencilcode_internal.st(self.jsSpriteObject)
def pu(self):
pencilcode_internal.pu(self.jsSpriteObject)
def pd(self):
pencilcode_internal.pd(self.jsSpriteObject)
def box(self, a, b):
pencilcode_internal.box(self.jsSpriteObject,a,b)
def grow(self, a):
pencilcode_internal.grow(self.jsSpriteObject,a)
def pen(self, color, size):
pencilcode_internal.pen(self.jsSpriteObject,color, size)
def dot(self, color, size):
pencilcode_internal.dot(self.jsSpriteObject,color, size)
def fill(self, color):
pencilcode_internal.fill(self.jsSpriteObject,color)
def wear(self, name):
pencilcode_internal.wear(self.jsSpriteObject,name)
def drawon(self, path):
pencilcode_internal.drawon(self.jsSpriteObject,path)
####################
## Sound Commands ##
####################
def play(self, tone):
pencilcode_internal.play(self.jsSpriteObject, tone)
def tone(self, a, b, c = None):
pencilcode_internal.tone(self.jsSpriteObject, a, b, c)
def silence(self):
pencilcode_internal.silence(self.jsSpriteObject)
def say(self, a):
pencilcode_internal.say(self.jsSpriteObject, a)
# These commands act on the default turtle object (which is not wrapped.).
###################
## Move Commands ##
###################
def fd(value):
pencilcode_internal.fd(None, value)
def bk(value):
pencilcode_internal.bk(None, value)
def rt(value):
pencilcode_internal.rt(None, value)
def lt(value):
pencilcode_internal.lt(None, value)
# Fix the rest, Stevie! ;)# All Done Jem! :)
def ra(radius, angle):
pencilcode_internal.ra(None, radius, angle)
def la(radius, angle):
pencilcode_internal.la(None, radius, angle)
def speed(value):
pencilcode_internal.speed(None, value)
def home():
pencilcode_internal.home(None)
def turnto(value):
pencilcode_internal.turnto(None, value)
def moveto(x, y):
pencilcode_internal.moveto(None, x, y)
def movexy(x, y):
pencilcode_internal.movexy(None, x, y)
def jumpto(x, y):
pencilcode_internal.jumpto(None, x,y)
def jumpxy(x, y):
pencilcode_internal.jumpxy(None, x, y)
def pause(value):
pencilcode_internal.sleep(None, value)
##################
## Art Commands ##
##################
def hide():
pencilcode_internal.hide(None)
def show():
pencilcode_internal.show(None)
def cs():
pencilcode_internal.cs(None)
def ht():
pencilcode_internal.ht(None)
def st():
pencilcode_internal.st(None)
def pu():
pencilcode_internal.pu(None)
def pd():
pencilcode_internal.pd(None)
def box(a, b):
pencilcode_internal.box(None,a,b)
def grow(a):
pencilcode_internal.grow(None,a)
def pen(color, size):
pencilcode_internal.pen(None,color, size)
def dot(color, size):
pencilcode_internal.dot(None,color, size)
def fill(color):
pencilcode_internal.fill(None,color)
def wear(name):
pencilcode_internal.wear(None,name)
def drawon(path):
pencilcode_internal.drawon(None,path)
###################
## Text Commands ##
###################
def write(message):
pencilcode_internal.write(message)
def debug(object):
pencilcode_internal.debug(object)
def type(message):
pencilcode_internal.type(message)
def typebox(color):
pencilcode_internal.typebox(color)
def typeline():
pencilcode_internal.typeline()
def label(name):
pencilcode_internal.label(name)
def await(lamda_exp):
# TODO - this might be tricky
pass
def read(prompt):
pencilcode_internal.read(prompt)
def readnum(prompt):
pencilcode_internal.readnum(prompt)
####################
## Sound Commands ##
####################
def play(tone):
pencilcode_internal.play(None, tone)
def tone(a, b, c = None):
pencilcode_internal.tone(None, a, b, c)
def silence():
pencilcode_internal.silence(None)
def say(a):
pencilcode_internal.say(None, a)
######################
## Control Commands ##
######################
def button(buttonClick, callee):
pencilcode_internal.button(None, buttonClick, callee)
def keydown(key):
pencilcode_internal.keydown(None, key)
def click(t):
pencilcode_internal.click(None, t)
######################
## Sprites ##
######################
def Turtle(color):
return SpriteObject(pencilcode_internal.Turtle(color))
def Sprite():
return SpriteObject(pencilcode_internal.Sprite())
def Piano():
return SpriteObject(pencilcode_internal.Piano())
def Pencil():
return SpriteObject(pencilcode_internal.Pencil())
######################
## Operators ##
######################
def random(a):
return pencilcode_internal.random(a)
def min(a, b = None):
return pencilcode_internal.min(a,b)
| true |
36fc8273143ca086da34d4d34cd140e1a32c7765 | Python | Charleo85/SIS-Rebuild | /misc/data/hello.py | UTF-8 | 851 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | from pyspark import SparkContext
sc = SparkContext("spark://spark-master:7077", "PopularItems")
data = sc.textFile("/tmp/data/inputs/sample.in", 2) # each worker loads a piece of the data file
pairs = data.map(lambda line: line.split(",")) # tell each worker to split each line of it's partition
pages = pairs.map(lambda pair: (pair[1], 1)) # re-layout the data to ignore the user id
count = pages.reduceByKey(lambda x,y: x+y) # shuffle the data so that each key is only on one worker
# and then reduce all the values by adding them together
output = count.collect() # bring the data back to the master node so we can print it out
for page_id, count in output:
print ("page_id %s count %d" % (page_id, count))
print ("Popular items done")
sc.stop()
| true |
f93b3a6286ea77881d77265a76c4b36daac7c99d | Python | crystalee01/read112 | /read112code.py | UTF-8 | 12,719 | 3.46875 | 3 | [] | no_license | from cmu_112_graphics import *
from texttospeech import *
from tkinter import *
import random, math
from PIL import Image
import string
'''
Goal: make educational app for children with dyslexia
Features:
- generate random words with confusing vowels and playback separate phonetic sounds
- highlight; lots of colors
- play audio of words that are hard to spell, and then they enter it, and we spell check
- playback audio with slower pronounciation of a word
- keyPressed for each keyboard key that plays the sound whenever we press it
- words describing tactile things you also display the image (e.g. tree)
'''
class SplashScreenMode(Mode):
def redrawAll(self, canvas):
self.font = "Arial 26 bold"
canvas.create_rectangle(450, 200, 750, 300, fill="green")
canvas.create_rectangle(450, 350, 750, 450, fill="blue")
canvas.create_rectangle(450, 500, 750, 600, fill="red")
canvas.create_rectangle(450, 650, 750, 750, fill="orange")
canvas.create_text(600, 50, text="Welcome to Read112!", font=self.font)
canvas.create_text(600, 100, text="Click on an activity to get started!", font=self.font)
canvas.create_text(600, 250, text="Spelling", font=self.font, fill="white")
canvas.create_text(600, 400, text="Typing", font=self.font, fill="white")
canvas.create_text(600, 550, text="Images", font=self.font, fill="white")
canvas.create_text(600, 700, text="Picture Match", font=self.font, fill="white")
def mousePressed(self, event):
if (event.x >= 450) and (event.x <= 750):
if (event.y >= 200) and (event.y <= 300): #Spelling
self.app.setActiveMode(self.app.spellingMode)
elif (event.y >= 350) and (event.y <= 450): #Typing
self.app.setActiveMode(self.app.typingMode)
elif (event.y >= 500) and (event.y <= 600): #Images
self.app.setActiveMode(self.app.imageMode)
elif (event.y >= 650) and (event.y <= 750): #Picture Match
self.app.setActiveMode(self.app.pictureMatchMode)
class SpellingMode(Mode):
def appStarted(self):
self.tts = TextToSpeech()
self.words = ["cat", "start", "stare"]
urlSpeaker = "https://tinyurl.com/yyv3eflw"
self.imageSpeaker = self.scaleImage(self.loadImage(urlSpeaker), 1/8)
self.cacheSpeaker = ImageTk.PhotoImage(self.imageSpeaker)
self.inputtedString = ''
self.wordIndex = 0
self.currentWord = self.words[self.wordIndex]
self.isDone = False
self.message = ''
def initWord(self):
self.inputtedString = ''
self.wordIndex += 1
if self.wordIndex < len(self.words):
self.currentWord = self.words[self.wordIndex]
else:
self.isDone = True
def keyPressed(self, event):
if event.key == "Enter":
self.checkSpelling()
elif event.key == "Delete":
self.inputtedString = self.inputtedString[:-1]
else:
self.inputtedString = self.inputtedString + event.key
def checkSpelling(self):
if self.inputtedString == self.currentWord:
self.message = "Yay!"
else:
self.message = f'The correct spelling is: {self.currentWord}'
self.initWord()
def redrawAll(self, canvas):
canvas.create_rectangle(0, 0, self.width, self.height, fill=self.app.offwhite)
canvas.create_oval(5, 5, 100, 50, fill=self.app.maroon)
canvas.create_text(52, 27, text="Back", font=self.app.font, fill=self.app.offwhite)
canvas.create_text(600, 100, text="Click on the speaker icon and type the word you hear.", font=self.app.font)
canvas.create_image(600, 200, image=self.cacheSpeaker)
canvas.create_rectangle(500, 300, 700, 325, fill="gray", width=2)
canvas.create_text(600, 315, text=self.inputtedString, fill=self.app.offwhite)
canvas.create_text(600, 500, text=self.message, font=self.app.font)
def mousePressed(self, event):
if (event.x > 5) and (event.x < 100) and (event.y > 5) and (event.y < 50):
self.app.setActiveMode(self.app.splashMode)
elif (event.x > 550) and (event.x < 650) and (event.y > 150) and (event.y < 250):
self.tts.get_pronunciation(self.currentWord)
class TypingMode(Mode):
def appStarted(self):
self.tts = TextToSpeech()
self.wordDict = dict()
self.initWordsDict()
self.initNewWord()
def initWordsDict(self):
self.words = ["tree", "cat", "cut", "dog", "bog", "start", "stare", "glass", "gas"]
for word in self.words:
self.wordDict[word] = list(word)
def initNewWord(self):
self.currentWord = self.words[random.randint(0, len(self.words) - 1)]
self.letterVals = [False] * len(self.currentWord)
self.index = 0
def keyPressed(self, event):
for letter in string.ascii_lowercase:
if event.key == letter:
self.tts.get_pronunciation(letter)
self.checkLetter(event.key)
def checkLetter(self, userLetter):
letters = self.wordDict[self.currentWord]
if userLetter == letters[self.index]:
self.letterVals[self.index] = True
self.index += 1
if self.index == len(self.currentWord):
self.initNewWord()
def drawLetters(self, word, canvas):
font = "Arial 120 bold"
letterMargin = self.width // (len(self.currentWord) + 2)
for i in range(len(self.currentWord)):
if self.letterVals[i] == False:
fill = "blue"
else: fill = "green"
canvas.create_text(letterMargin * (i + 1), self.height // 2,
text=self.wordDict[self.currentWord][i],
fill=fill, font=font)
def redrawAll(self, canvas):
canvas.create_rectangle(0, 0, self.width, self.height, fill=self.app.offwhite)
canvas.create_oval(5, 5, 100, 50, fill=self.app.maroon)
canvas.create_text(52, 27, text="Back", font=self.app.font, fill=self.app.offwhite)
canvas.create_text(self.width // 2, 40, font=self.app.font, text='''
Type each letter you see on the screen and follow along with the pronunciation!
''')
self.drawLetters(self.currentWord, canvas)
def mousePressed(self, event):
if (event.x > 5) and (event.x < 100) and (event.y > 5) and (event.y < 50):
self.app.setActiveMode(self.app.splashMode)
class ImageMode(Mode):
def appStarted(mode):
mode.message = 'Click on the mouse to enter a kind of animal'
mode.image=mode.app.loadImage('https://tinyurl.com/y44ge5bv')
def mousePressed(mode, event):
if (event.x > 5) and (event.x < 100) and (event.y > 5) and (event.y < 50):
mode.app.setActiveMode(mode.app.splashMode)
animal = mode.getUserInput('What is an animal \
you would like to know more about')
if (animal == None):
mode.message = 'You canceled!'
else:
mode.message = f'Here is a {animal}!'
if (animal=="tiger"):
mode.image=mode.app.loadImage('https://tinyurl.com/y4d33q9r')
if (animal=="cat"):
mode.image=mode.app.loadImage('https://tinyurl.com/y4cjpxxp')
if (animal=="chicken"):
mode.image=mode.app.loadImage('https://tinyurl.com/yxzd7qky')
if (animal=="dog"):
mode.image=mode.app.loadImage('https://tinyurl.com/y7dghhz3')
if (animal=="duck"):
mode.image=mode.app.loadImage('https://tinyurl.com/yxwfjqwp')
if (animal=="fish"):
mode.image=mode.app.loadImage('https://tinyurl.com/y4x5lqch')
if (animal=="frog"):
mode.image=mode.app.loadImage('https://tinyurl.com/y2atcte5')
if animal=="cow":
mode.image=mode.app.loadImage('https://tinyurl.com/y2pumuau')
if animal=="horse":
mode.image=mode.app.loadImage('https://tinyurl.com/y5yndcdz')
if animal=="mouse":
mode.image=mode.app.loadImage
if animal=="pig":
mode.image=mode.app.loadImage
if animal=="rabbit":
mode.image=mode.app.loadImage('https://tinyurl.com/yyq284k8')
if animal=="elephant":
mode.image=mode.app.loadImage('https://tinyurl.com/y2az3r6j')
else:
mode.message="Sorry, we don't have this animal"
def redrawAll(mode, canvas):
canvas.create_rectangle(0, 0, mode.width, mode.height, fill=mode.app.offwhite)
canvas.create_oval(5, 5, 100, 50, fill=mode.app.maroon)
canvas.create_text(52, 27, text="Back", font=mode.app.font, fill=mode.app.offwhite)
font = 'Arial 26 bold'
canvas.create_text(mode.width/2, 20, text=mode.message, font=font)
canvas.create_image(400, 400, image=ImageTk.PhotoImage(mode.image))
def rgb(red, green, blue):
return "#%02x%02x%02x" % (red, green, blue)
class Letter(object):
def __init__(mode, x, y):
mode.x = x
mode.y = y
class PictureMatchMode(Mode):
def appStarted(mode):
url1 = 'http://www.pngmart.com/files/3/Singing-PNG-HD.png'
sing = mode.loadImage(url1)
mode.sing = mode.scaleImage(sing, .35)
mode.blank1 = '_'
mode.blank2 = '_'
mode.startG()
mode.startN()
mode.isDraggingG = False
mode.isDraggingN = False
def startG(mode):
mode.G = Letter(.4*mode.width, .8*mode.height)
def startN(mode):
mode.N = Letter(.6*mode.width, .8*mode.height)
def mousePressed(mode, event):
if ((mode.G.x-20) <= event.x <= (mode.G.x+20)) and ((mode.G.y-20) <= event.y <= (mode.G.y+20)):
mode.isDraggingG = True
if ((mode.N.x-20) <= event.x <= (mode.N.x+20)) and ((mode.N.y-20) <= event.y <= (mode.N.y+20)):
mode.isDraggingN = True
if (event.x > 5) and (event.x < 100) and (event.y > 5) and (event.y < 50):
mode.app.setActiveMode(mode.app.splashMode)
def mouseDragged(mode, event):
if mode.isDraggingG == True:
mode.G.x = event.x
mode.G.y = event.y
if mode.isDraggingN == True:
mode.N.x = event.x
mode.N.y = event.y
def mouseReleased(mode, event):
if (690 <= event.x == mode.G.x <= 770) and ((.5*mode.height-50) <= event.y == mode.G.y <= (.5*mode.height+80)) \
or (mode.G.x == mode.width + 100 or mode.G.y == mode.height + 100):
mode.isDraggingG = False
mode.G.x = mode.width + 100
mode.G.y = mode.height + 100
mode.blank2 = 'g'
else:
mode.startG()
if (600 <= event.x == mode.N.x <= 660) and ((.5*mode.height-30) <= event.y == mode.N.y <= (.5*mode.height+50)) \
or (mode.N.x == mode.width + 100 or mode.N.y == mode.height + 100):
mode.isDraggingN = False
mode.N.x = mode.width + 100
mode.N.y = mode.height + 100
mode.blank1 = 'n'
else:
mode.startN()
def redrawAll(mode, canvas):
canvas.create_rectangle(0, 0, mode.width, mode.height, fill=mode.app.offwhite)
canvas.create_rectangle(0,0,mode.width,mode.height,fill=rgb(230,230,250))
canvas.create_oval(5, 5, 100, 50, fill=mode.app.maroon)
canvas.create_text(52, 27, text="Back", font=mode.app.font, fill=mode.app.offwhite)
canvas.create_image(mode.width/2, mode.height/5, image=ImageTk.PhotoImage(mode.sing))
canvas.create_text(mode.width/2, .5*mode.height, text=f's i {mode.blank1} {mode.blank2}', \
font='Arial 80 bold', fill=rgb(139,0,139))
canvas.create_text(mode.G.x, mode.G.y, text='g', font='Arial 80 bold', fill=rgb(139,0,139))
canvas.create_text(mode.N.x, mode.N.y, text='n', font='Arial 80 bold', fill=rgb(139,0,139))
class MyModalApp(ModalApp):
def appStarted(self):
self.splashMode = SplashScreenMode()
self.spellingMode = SpellingMode()
self.typingMode = TypingMode()
self.imageMode = ImageMode()
self.pictureMatchMode = PictureMatchMode()
self.setActiveMode(self.splashMode)
self.styleInit()
def styleInit(self):
self.font = "Arial 26 bold"
self.offwhite = "#%02x%02x%02x" % (255, 250, 241)
self.maroon = "#%02x%02x%02x" % (176, 48, 96)
MyModalApp(width=1200, height=800)
| true |
8ba6f3b56c4603d64614b137859efbcdd275c35c | Python | felipesteodoro/tdc2020sp | /template_simple_ga_feature_selection.py | UTF-8 | 4,269 | 2.53125 | 3 | [] | no_license |
import random
import numpy as np
#pip install deap
from deap import base
from deap import creator
from deap import algorithms
from deap import tools
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import plot_confusion_matrix
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
import multiprocessing
dataset = pd.read_csv("yourdataset.csv")
y = dataset["class"].values
dataset.drop(["class"], inplace = True, axis = 1)
X = dataset.values
scalar = StandardScaler().fit(X)
X = scalar.transform(X)
classifier = SVC(C = 100, gamma = 0.0001, kernel = 'rbf')
#Adaptado de https://github.com/kaushalshetty/FeatureSelectionGA
def calculate_fitness(individual):
np_ind = np.asarray(individual)
if np.sum(np_ind) == 0:
return (0.0,)
else:
feature_idx = np.where(np_ind==1)[0]
x_temp = X[:,feature_idx]
cv_set = np.repeat(-1.,x_temp.shape[0])
skf = StratifiedKFold(n_splits = 5)
for train_index,test_index in skf.split(x_temp,y):
X_train,X_test = x_temp[train_index],x_temp[test_index]
y_train,y_test = y[train_index],y[test_index]
if X_train.shape[0] != y_train.shape[0]:
raise Exception()
classifier.fit(X_train,y_train)
predicted_y = classifier.predict(X_test)
cv_set[test_index] = predicted_y
acc = accuracy_score(y, cv_set)
return (acc,)
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=X.shape[1])
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", calculate_fitness)
toolbox.register("mate", tools.cxUniform, indpb=0.3)
toolbox.register("mutate", tools.mutFlipBit, indpb = 0.05)
toolbox.register("select", tools.selBest)
if __name__ == "__main__":
random.seed(25)
MU, LAMBDA = 200, 400
populacao = toolbox.population(n = MU)
probabilidade_crossover = 0.8
probabilidade_mutacao = 0.2
numero_geracoes = 200
pool = multiprocessing.Pool()
toolbox.register("map", pool.map)
estatisticas = tools.Statistics(key=lambda individuo: individuo.fitness.values)
estatisticas.register("max", np.max)
estatisticas.register("min", np.min)
estatisticas.register("med", np.mean)
estatisticas.register("std", np.std)
populacao, info = algorithms.eaMuPlusLambda(populacao, toolbox,MU,LAMBDA, probabilidade_crossover, probabilidade_mutacao, numero_geracoes, estatisticas, verbose = True)
melhores = tools.selBest(populacao, 1)
valores_grafico = info.select("max")
plt.figure("Evolução")
plt.plot(valores_grafico)
plt.title("Acompanhamento dos valores")
plt.show()
feat_selected = pd.DataFrame(list(melhores[0]), columns = ["Selected"])
feat_selected = feat_selected["Selected"] == 1
dtselected = dataset[dataset.columns[feat_selected]]
X = dtselected.values
dtselected["class"] = y
dtselected.to_csv('features_selected_dataset.csv')
scalar = StandardScaler().fit(X)
X = scalar.transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state=25)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
plt.figure("Matriz de Confusão")
plot_confusion_matrix(classifier, X_test, y_test, normalize = 'true')
print(classification_report(y_test, y_pred))
print(metrics.accuracy_score(y_test, y_pred))
pool.close()
| true |
aa0435b4dd54a4d902dd9318f965c2f04582b32b | Python | chahinMalek/automata | /main.py | UTF-8 | 546 | 2.78125 | 3 | [] | no_license | from automata import Alphabet
from automata import Nfa
al = Alphabet({'a', 'b'})
n = Nfa(3, al, 0, 0)
n.add_transition(0, 1, 'b')
n.add_transition(0, 2, None)
n.add_transition(1, 1, 'a')
n.add_transition(1, 2, 'a')
n.add_transition(1, 2, 'b')
n.add_transition(2, 0, 'a')
# n: Nfa = Nfa(2, al, 0, 0)
# n.add_transition(0, 1, None)
# n.add_transition(1, 0, '0')
# n.add_transition(1, 0, '1')
d = n.convert_to_dfa()
d.remove_redundant_states()
print(d.accepts('aa'))
print(n.accepts('aa'))
d = n.convert_to_dfa()
d.remove_redundant_states()
| true |
32851ce2d3bc79cd24acf298faf62738df4c9376 | Python | comojin1994/Algorithm_Study | /Uijeong/Python/SM/test4.py | UTF-8 | 832 | 3.21875 | 3 | [] | no_license | import sys
input = sys.stdin.readline
def binary_search(arr, key):
lower = 0
upper = len(arr) - 1
while lower <= upper:
mid = (lower + upper) // 2
if key <= arr[mid]:
upper = mid - 1
else:
lower = mid + 1
return lower
if __name__ == "__main__":
N = int(input())
skills = {}
for i in range(N):
skills[i] = list(map(int, input().strip().split()))
skills_sort = sorted(skills.items(), key=lambda x: [x[1][0], x[1][1]])
e_sort = [skills_sort[0][1][1]]
for key, value in skills_sort[1:]:
idx = binary_search(e_sort, value[0])
skills[key] = idx
idx = binary_search(e_sort, value[1])
e_sort = e_sort[:idx] + [value[1]] + e_sort[idx:]
skills = sorted(skills.keys())
for sk in skills:
print(sk)
| true |
a3904dcb5bcc3be09aa51b4b6f1afa577abd8117 | Python | akaped/pygments-styles | /themes/vividchalk.py | UTF-8 | 1,176 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Vividchalk Colorscheme
~~~~~~~~~~~~~~~~~~~~~~
Converted by Vim Colorscheme Converter
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, Generic, Number, Operator, String
class VividchalkStyle(Style):
background_color = '#000000'
styles = {
Token: '#EEEEEE bg:#000000',
Name.Constant: '#339999 underline',
Generic.Deleted: '#8a2be2 bg:#008080 bold',
Name.Variable: '#FFCC00 underline',
String: '#66FF00',
Keyword.Type: '#AAAA77 underline',
Name.Tag: '#FF6600 bold',
Keyword: '#FF6600 bold',
Comment.Preproc: '#AAFFFF underline',
Name.Entity: '#33AA00 bold',
Generic.Error: '#ffffff bg:#ff0000',
Comment: '#9933CC bold italic',
Generic.Inserted: 'bg:#00008b bold',
Generic.Traceback: 'bg:#ff0000',
Generic.Subheading: '#ff00ff bold',
Generic.Heading: '#ff00ff bold',
Generic.Output: '#404040 bold',
Generic.Emph: 'underline',
}
| true |
8c1b0c7373b4c24b7907c9e7a4bc7251c3b9605e | Python | MarkCBell/bigger | /bigger/draw.py | UTF-8 | 17,958 | 2.8125 | 3 | [
"MIT"
] | permissive | """ A module for making images of laminations. """
from __future__ import annotations
import os
from copy import deepcopy
from math import sin, cos, pi, ceil
from typing import Any, Generic, Optional, TypeVar
from PIL import Image, ImageDraw, ImageFont # type: ignore
import bigger
from bigger.types import Edge, Coord, FlatTriangle
from .triangulation import Triangle
# Vectors to offset a label by to produce backing.
OFFSETS = [(1.5 * cos(2 * pi * i / 12), 1.5 * sin(2 * pi * i / 12)) for i in range(12)]
# Colours of things.
DEFAULT_EDGE_LABEL_COLOUR = "black"
DEFAULT_EDGE_LABEL_BG_COLOUR = "white"
MAX_DRAWABLE = 100 # Maximum weight of a multicurve to draw fully.
ZOOM_FRACTION = 0.8
VERTEX_BUFFER = 0.2
LAMINATION_COLOUR = "#555555"
VERTEX_COLOUR = "#404040"
TRIANGLE_COLOURS = {"bw": ["#b5b5b5", "#c0c0c0", "#c7c7c7", "#cfcfcf"], "rainbow": [f"hsl({i}, 50%, 50%)" for i in range(0, 360, 10)]}
T = TypeVar("T")
def deduplicate(items: list[T]) -> list[T]:
"""Return the same list but without duplicates."""
output = []
seen = set()
for item in items:
if item not in seen:
output.append(item)
seen.add(item)
return output
def add(A: Coord, B: Coord, s: float = 1.0, t: float = 1.0) -> Coord:
"""Return the point sA + tB."""
return (s * A[0] + t * B[0], s * A[1] + t * B[1])
def interpolate(A: Coord, B: Coord, r: float = 0.5) -> Coord:
"""Return the point that is r% from B to A."""
return add(A, B, r, 1 - r)
def support(triangulation: bigger.Triangulation[Edge], edge: Edge) -> tuple[Triangle[Edge], Triangle[Edge]]:
"""Return the two triangles that support and edge."""
side = bigger.Side(edge)
return triangulation.triangle(side), triangulation.triangle(~side)
def connected_components(triangulation: bigger.Triangulation[Edge], edges: list[Edge]) -> tuple[list[list[Triangle[Edge]]], set[Edge]]:
"""Return a list of list of triangles that support these edges, grouped by connectedness, and a set of edges that in the interior."""
position_index = dict((edge, index) for index, edge in enumerate(edges))
interior = set()
# Kruskal's algorithm
components = bigger.UnionFind(deduplicate([triangle for edge in edges for triangle in support(triangulation, edge)]))
for edge in edges:
t1, t2 = support(triangulation, edge)
if components(t1) != components(t2): # Don't merge if it would create a loop in the dual graph.
interior.add(edge)
components.union2(t1, t2)
# Order the triangles of each component by their position_index.
ordered_components = [sorted(list(component), key=lambda triangle: tuple(position_index.get(side.edge, len(position_index)) for side in triangle)) for component in components]
return ordered_components, interior
def default_layout_triangulation(triangulation: bigger.Triangulation[Edge], component: list[Triangle[Edge]], interior: set[Edge]) -> dict[Triangle[Edge], FlatTriangle]:
"""Return a dictionary mapping the triangles that meet the given edges to coordinates in the plane.
Triangle T is mapped to ((x1, y1), (x2, y2), (x3, y3)) where (xi, yi) is at the tail of side i of T when oriented anti-clockwise.
Coordinate are within the w x h rectangle."""
r = 1000.0
# Create the vertices.
num_outside = sum(1 for triangle in component for side in triangle if side.edge not in interior)
vertices = [(r * sin(2 * pi * (i - 0.5) / num_outside), r * cos(2 * pi * (i - 0.5) / num_outside)) for i in range(num_outside)]
# Determine how many boundary edges occur between each edge's endpoints.
# We really should do this in a sensible order so that it only takes a single pass.
num_descendants = dict((side, 1) for triangle in component for side in triangle if side.edge not in interior)
stack = [side for triangle in component for side in triangle if side.edge in interior]
while stack:
current = stack.pop()
if current in num_descendants:
continue
# So current is in interior.
other = ~current
other_sides = [other_side for other_side in triangulation.triangle(other) if other_side != other]
try:
num_descendants[current] = sum(num_descendants[other_side] for other_side in other_sides)
except KeyError: # We need to evaluate one of the other sides first.
stack.append(current) # Re-evaluate when we get back here.
stack.extend(other_sides)
# Work out which vertex (number) each side of each triangle starts at.
start = component[0]
triangle_vertex_number = {start[0]: 0, start[1]: num_descendants[start[0]], start[2]: num_descendants[start[0]] + num_descendants[start[1]]}
to_extend = [side for side in start if side.edge in interior]
while to_extend:
current = to_extend.pop()
A = triangulation.corner(current)
B = triangulation.corner(~current)
triangle_vertex_number[B[0]] = triangle_vertex_number[A[1]]
triangle_vertex_number[B[1]] = triangle_vertex_number[A[0]]
triangle_vertex_number[B[2]] = triangle_vertex_number[B[1]] + num_descendants[B[1]]
for i in [1, 2]:
if B[i].edge in interior:
to_extend.append(B[i])
layout = dict()
for triangle in component:
layout[triangle] = (vertices[triangle_vertex_number[triangle[0]]], vertices[triangle_vertex_number[triangle[1]]], vertices[triangle_vertex_number[triangle[2]]])
return layout
def draw_block_triangle(canvas: ImageDraw, vertices: FlatTriangle, weights: list[int], master: int) -> None:
"""Draw a flat triangle with (blocks of) lines inside it."""
weights_0 = [max(weight, 0) for weight in weights]
sum_weights_0 = sum(weights_0)
correction = min(min(sum_weights_0 - 2 * e for e in weights_0), 0)
dual_weights = [bigger.half(sum_weights_0 - 2 * e + correction) for e in weights_0]
parallel_weights = [max(-weight, 0) for weight in weights]
for i in range(3):
# Dual arcs.
if dual_weights[i] > 0:
# We first do the edge to the left of the vertex.
# Correction factor to take into account the weight on this edge.
s_a = (1 - 2 * VERTEX_BUFFER) * weights_0[i - 2] / master
# The fractions of the distance of the two points on this edge.
scale_a = (1 - s_a) / 2
scale_a2 = scale_a + s_a * dual_weights[i] / weights_0[i - 2]
# Now repeat for the other edge of the triangle.
s_b = (1 - 2 * VERTEX_BUFFER) * weights_0[i - 1] / master
scale_b = (1 - s_b) / 2
scale_b2 = scale_b + s_b * dual_weights[i] / weights_0[i - 1]
S1 = interpolate(vertices[i - 2], vertices[i - 1], scale_a)
E1 = interpolate(vertices[i - 0], vertices[i - 1], scale_b)
S2 = interpolate(vertices[i - 2], vertices[i - 1], scale_a2)
E2 = interpolate(vertices[i - 0], vertices[i - 1], scale_b2)
canvas.polygon([S1, E1, E2, S2], fill=LAMINATION_COLOUR)
elif dual_weights[i] < 0: # Terminal arc.
s_0 = (1 - 2 * VERTEX_BUFFER) * weights_0[i] / master
scale_a = (1 - s_0) / 2 + s_0 * dual_weights[i - 1] / weights_0[i]
scale_a2 = scale_a + s_0 * (-dual_weights[i]) / weights_0[i]
S1 = interpolate(vertices[i - 0], vertices[i - 2], scale_a)
E1 = vertices[i - 1]
S2 = interpolate(vertices[i - 0], vertices[i - 2], scale_a2)
E2 = vertices[i - 1]
canvas.polygon([S1, E1, E2, S2], fill=LAMINATION_COLOUR)
else: # dual_weights[i] == 0: # Nothing to draw.
pass
# Parallel arcs.
if parallel_weights[i]:
S, O, E = vertices[i - 2], vertices[i - 1], vertices[i]
SS = interpolate(O, S, VERTEX_BUFFER)
EE = interpolate(O, E, VERTEX_BUFFER)
M = interpolate(S, E)
MM = interpolate(SS, EE)
s = parallel_weights[i] / master
P = interpolate(MM, M, s)
canvas.polygon([S, P, E], fill=LAMINATION_COLOUR)
def draw_line_triangle(canvas: ImageDraw, vertices: FlatTriangle, weights: list[int], master: int) -> None:
"""Draw a flat triangle with (individual) lines inside it."""
weights_0 = [max(weight, 0) for weight in weights]
sum_weights_0 = sum(weights_0)
correction = min(min(sum_weights_0 - 2 * e for e in weights_0), 0)
dual_weights = [bigger.half(sum_weights_0 - 2 * e + correction) for e in weights_0]
parallel_weights = [max(-weight, 0) for weight in weights]
for i in range(3): # Dual arcs:
if dual_weights[i] > 0:
s_a = 1 - 2 * VERTEX_BUFFER
s_b = 1 - 2 * VERTEX_BUFFER
for j in range(dual_weights[i]):
scale_a = 0.5 if weights_0[i - 2] == 1 else (1 - s_a) / 2 + s_a * j / (weights_0[i - 2] - 1)
scale_b = 0.5 if weights_0[i - 1] == 1 else (1 - s_b) / 2 + s_b * j / (weights_0[i - 1] - 1)
S1 = interpolate(vertices[i - 2], vertices[i - 1], scale_a)
E1 = interpolate(vertices[i - 0], vertices[i - 1], scale_b)
canvas.line([S1, E1], fill=LAMINATION_COLOUR, width=2)
elif dual_weights[i] < 0: # Terminal arc.
s_0 = 1 - 2 * VERTEX_BUFFER
for j in range(-dual_weights[i]):
scale_a = 0.5 if weights_0[i] == 1 else (1 - s_0) / 2 + s_0 * dual_weights[i - 1] / (weights_0[i] - 1) + s_0 * j / (weights_0[i] - 1)
S1 = interpolate(vertices[i - 0], vertices[i - 2], scale_a)
E1 = vertices[i - 1]
canvas.line([S1, E1], fill=LAMINATION_COLOUR, width=2)
else: # dual_weights[i] == 0: # Nothing to draw.
pass
# Parallel arcs:
if parallel_weights[i]:
S, O, E = vertices[i - 2], vertices[i - 1], vertices[i]
SS = interpolate(O, S, VERTEX_BUFFER)
EE = interpolate(O, E, VERTEX_BUFFER)
M = interpolate(S, E)
MM = interpolate(SS, EE)
for j in range(parallel_weights[i] // 2):
s = float(j + 1) / master
P = interpolate(MM, M, s)
canvas.line([S, P, E], fill=LAMINATION_COLOUR, width=2)
if parallel_weights[i] % 2 == 1:
canvas.line([S, E], fill=LAMINATION_COLOUR, width=2)
class DrawStructure(Generic[Edge]): # pylint: disable=too-many-instance-attributes
"""A class to record intermediate draw commands."""
def __init__(self, **options: Any):
self.edges: Optional[list[Edge]] = None
self.w = 400
self.h = 400
self.label = "none"
self.layout = None
self.colour = "bw"
self.outline = False
self.textsize = 12
self.set_options(**options)
def set_options(self, **options: Any) -> None:
"""Set the options passed in."""
for key, value in options.items():
setattr(self, key, value)
def __call__(self, *objs: bigger.Lamination[Edge] | bigger.MCG[Edge] | bigger.Triangulation[Edge], **options: Any) -> DrawStructure | Image:
draw_structure = deepcopy(self)
draw_structure.set_options(**options)
if not objs:
return draw_structure
elif not draw_structure.edges:
raise TypeError("draw() missing 1 required positional argument: 'edges'")
for obj in objs:
if not isinstance(obj, (bigger.Triangulation, bigger.Lamination, bigger.MCG)):
raise TypeError(f"Unable to draw objects of type: {type(obj)}")
return draw_structure.draw_objs(*objs)
def draw_objs(self, *objs: bigger.Triangulation[Edge] | bigger.Lamination[Edge] | bigger.MCG[Edge]) -> Image: # pylint: disable=too-many-statements, too-many-branches
"""Return an image of these objects.
This method assumes that:
- at least one object is given,
- that all objects exist on the first triangulation, and
- self.edges has been set."""
image = Image.new("RGB", (self.w, self.h), color="White")
font = ImageFont.truetype(os.path.join(os.path.dirname(__file__), "fonts", "FreeMonoBold.ttf"), self.textsize)
canvas = ImageDraw.Draw(image)
assert self.edges is not None
if isinstance(objs[0], bigger.Triangulation):
triangulation = objs[0]
elif isinstance(objs[0], bigger.Lamination):
triangulation = objs[0].triangulation
elif isinstance(objs[0], bigger.MCG):
triangulation = objs[0].triangulation
else:
raise TypeError(f"Unable to draw objects of type: {type(objs[0])}")
# Draw these triangles.
components, interior = connected_components(triangulation, self.edges)
if self.layout is None:
layout2 = dict(item for component in components for item in default_layout_triangulation(triangulation, component, interior).items())
else:
layout2 = dict((triangle, self.layout.layout(triangle)) for component in components for triangle in component)
# We will layout the components in a p x q grid.
# Aim to maximise r := min(w / p, h / q) subject to pq >= num_components.
# There is probably a closed formula for the optimal value of p (and so q).
num_components = len(components)
p = max(range(1, num_components + 1), key=lambda p: min(self.w / p, self.h / ceil(float(num_components) / p)))
q = int(ceil(float(num_components) / p))
ww = self.w / p * (1 + ZOOM_FRACTION) / 4
hh = self.h / q * (1 + ZOOM_FRACTION) / 4
dx = self.w / p
dy = self.h / q
# Scale & translate to fit into the [-r, r] x [-r, r] box.
layout3 = dict()
for component in components:
bb_w = min(vertex[0] for triangle in component for vertex in layout2[triangle])
bb_e = max(vertex[0] for triangle in component for vertex in layout2[triangle])
bb_n = min(vertex[1] for triangle in component for vertex in layout2[triangle])
bb_s = max(vertex[1] for triangle in component for vertex in layout2[triangle])
for triangle in component:
a, b, c = layout2[triangle]
layout3[triangle] = (
((a[0] - bb_w) * 2 * ww / (bb_e - bb_w) - ww, (a[1] - bb_n) * 2 * hh / (bb_s - bb_n) - hh),
((b[0] - bb_w) * 2 * ww / (bb_e - bb_w) - ww, (b[1] - bb_n) * 2 * hh / (bb_s - bb_n) - hh),
((c[0] - bb_w) * 2 * ww / (bb_e - bb_w) - ww, (c[1] - bb_n) * 2 * hh / (bb_s - bb_n) - hh),
)
# Move to correct region within the image.
layout4 = dict()
for index, component in enumerate(components):
for triangle in component:
centre = (dx * (index % p) + dx / 2, dy * int(index / p) + dy / 2)
a, b, c = layout3[triangle]
layout4[triangle] = (add(a, centre), add(b, centre), add(c, centre))
# Draw triangles.
triangle_colours = TRIANGLE_COLOURS[self.colour]
for index, (triangle, vertices) in enumerate(layout4.items()):
canvas.polygon(vertices, fill=triangle_colours[index % len(triangle_colours)], outline="white" if self.outline else None)
laminations = [obj for obj in objs if isinstance(obj, bigger.Lamination)]
for lamination in laminations:
weights = dict((edge, lamination(edge)) for edge in set(side.edge for triangle in layout4 for side in triangle))
master = max(abs(weights[side.edge]) for triangle in layout4 for side in triangle)
shown_is_integral = all(isinstance(weights[edge], int) for edge in weights)
# Draw lamination.
for index, (triangle, vertices) in enumerate(layout4.items()):
if master < MAX_DRAWABLE and shown_is_integral:
draw_line_triangle(canvas, vertices, [weights[side.edge] for side in triangle], master)
else: # Draw everything. Caution, this is is VERY slow (O(n) not O(log(n))) so we only do it when the weight is low.
draw_block_triangle(canvas, vertices, [weights[side.edge] for side in triangle], master)
# Draw labels.
for triangle, vertices in layout4.items():
for index, side in enumerate(triangle):
if self.label == "edge":
text = str(side.edge)
elif self.label == "weight" and len(laminations) == 1: # Only draw weights if there is a single lamination.
text = str(weights[side.edge])
else:
text = ""
point = interpolate(vertices[index - 0], vertices[index - 2])
# For some reason anchor="mm" does not work. So we will have to manually center the text ourselves.
w, h = canvas.textsize(text, font=font)
point = (point[0] - w / 2, point[1] - h / 2)
for offset in OFFSETS:
canvas.text(add(point, offset), text, fill="White", font=font)
canvas.text(point, text, fill="Black", font=font)
# Draw vertices.
for vertices in layout4.values():
for vertex in vertices:
canvas.ellipse([vertex[0] - 2, vertex[1] - 2, vertex[0] + 2, vertex[1] + 2], fill=VERTEX_COLOUR)
return image
def draw(*objs: bigger.Lamination[Edge] | bigger.MCG[Edge] | bigger.Triangulation[Edge], edges: Optional[list[Edge]] = None, **options: Any) -> DrawStructure | Image:
"""Draw the given object with the provided options."""
# This is only really here so we can provide "edges" as a keyword argument to users.
return DrawStructure[Edge](edges=edges, **options)(*objs)
| true |
050fec0ac2fd0eb74e694485b79c7b6da7369525 | Python | ScottLiao920/Arduino_Hourglass | /gy521/calibration.py | UTF-8 | 1,555 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | import serial
import io
from sympy import *
def getparas():
x = 0
y = 0
z = 0
for i in range(5):
x += float(sio.readline())
y += float(sio.readline())
z += float(sio.readline())
print("AcX AcY AcZ")
print(x,y,z)
x = x/5.00
y = y/5.00
z = z/5.00
print("AcX AcY AcZ(averaged)")
print(x,y,z)
return x,y,z
arduino = serial.Serial("COM3",timeout=1, baudrate=9600)
print("hey")
sio = io.TextIOWrapper(io.BufferedRWPair(arduino, arduino))
print(sio.readline())
data = ''
print("Another Approach")
x = [None]*10
y = [None]*10
z = [None]*10
for i in range(10):
print("Another iteration")
x[i], y[i], z[i] = getparas();
print(x,y,z)
a,b,c,A,B,C = symbols('a b c A B C')
print(solve([
((x[0]-A)**2)/(a**2) + ((y[0]-B)**2)/(b**2) + ((z[0]-C)**2)/(c**2),
((x[1]-A)**2)/(a**2) + ((y[1]-B)**2)/(b**2) + ((z[1]-C)**2)/(c**2),
((x[2]-A)**2)/(a**2) + ((y[2]-B)**2)/(b**2) + ((z[2]-C)**2)/(c**2),
((x[3]-A)**2)/(a**2) + ((y[3]-B)**2)/(b**2) + ((z[3]-C)**2)/(c**2),
((x[4]-A)**2)/(a**2) + ((y[4]-B)**2)/(b**2) + ((z[4]-C)**2)/(c**2),
((x[5]-A)**2)/(a**2) + ((y[5]-B)**2)/(b**2) + ((z[5]-C)**2)/(c**2),
((x[6]-A)**2)/(a**2) + ((y[6]-B)**2)/(b**2) + ((z[6]-C)**2)/(c**2),
((x[7]-A)**2)/(a**2) + ((y[7]-B)**2)/(b**2) + ((z[7]-C)**2)/(c**2),
((x[8]-A)**2)/(a**2) + ((y[8]-B)**2)/(b**2) + ((z[8]-C)**2)/(c**2),
((x[9]-A)**2)/(a**2) + ((y[9]-B)**2)/(b**2) + ((z[9]-C)**2)/(c**2),
],[a,b,c,A,B,C]))
| true |
56014991cfe57f34749b7f8b2c5897c8a5b1ee4c | Python | nanakwame667/Wine-Quality-Prediction | /PROJECT_FILES/utils.py | UTF-8 | 2,089 | 2.71875 | 3 | [] | no_license | import time
import pandas as pd
# models
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
def load_csv_dataset(csv_path=None, delimiter=','):
df = pd.read_csv(csv_path, delimiter=delimiter)
return df
def pre_process_dataset(samples, labels, test_ratio=0.3, random_state=0, scaler=StandardScaler()):
x_train, x_test, y_train, y_test = train_test_split(samples, labels,
test_size=test_ratio,
random_state=random_state)
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
return x_train, x_test, y_train, y_test
def train_model(model, x_train, y_train, shuffle_data=True, **kwargs):
if model == 'LinearRegression':
model = LinearRegression()
elif model == 'LogisticRegression':
model = LogisticRegression()
elif model == 'RandomForest':
model = RandomForestClassifier()
elif model == 'DecisionTree':
model = DecisionTreeClassifier()
elif model == 'SVM' or model == 'SVC':
model = SVC()
if shuffle_data:
x_train, y_train = shuffle(x_train, y_train)
return model.fit(x_train, y_train, **kwargs)
def test_model(model, x_test, y_test, process_result=None):
result = model.predict(x_test)
if process_result is not None:
result = process_result(result)
if y_test is not None:
cm = confusion_matrix(y_test, result)
asc = accuracy_score(y_test, result)
return result, cm, asc
return result
def timed_func(func):
start_time = time.time()
result = func()
end_time = time.time()
time_used = end_time - start_time
return result, time_used
| true |
7cb7af696f740899d577af67074e035905dcbf3c | Python | lrdmic/Pycharm-Projects | /26_listas.py | UTF-8 | 1,765 | 4.46875 | 4 | [] | no_license | # LISTAS
# Una lista es una coleccion de elementos, las listas estan ordenadas, y son mutables.
numeros = [5, 2, 23, 55, 1, 9, 6]
frutas = ["Manzanas", "Peras", "Uvas", "Naranjas", "Mandarinas", "Bananas", "Kiwi"]
print("LISTA ORIGINAL DE FRUTAS:")
print(frutas)
print()
# print(frutas[-1])
# print(frutas[-3])
# print(frutas[2:])
# FUNCIONES DE LAS LISTAS
# https://docs.python.org/3/library/array.html
# list, len, append, extend, insert, remove, clear, pop, index, count, sort, reverse, copy
# len -> sirve para averiguar el largo de una lista
print(len(frutas))
# append -> sirve para añadir un elemento a la lista
frutas.append("Piña")
print(frutas)
# extend -> sirve para agregar una lista a otra
lista2 = numeros + frutas
print(lista2)
numeros.extend(frutas)
print(numeros)
# insert -> sirve añadir un indice y mueve una posicion a la derecha los demas miembros de la lista
frutas.insert(2, "Aguacate")
print(frutas)
# remove -> sirve para eliminar un elemento de una lista
frutas.remove("Kiwi")
print(frutas)
# clear -> sirve para borrar todos los elementos de una lista
frutas.clear()
print(frutas)
# pop -> sirve para borrar el ultimo elemento de una lista
frutas.pop()
print(frutas)
# index -> sirve para localizar posicion/indice de un elemento dentro de una lista
print(frutas.index("Manzana"))
# count -> sirve para mostrar cuantas veces aparece un elemento dentro de una lista
print(frutas.count("Kiwi"))
# sort -> sirve para ordenar la lista
frutas.sort()
print(frutas)
numeros.sort()
print(numeros)
# reverse -> sirve para darle vuelta completamente al orden de una lista
numeros.reverse()
print(numeros)
# copy -> sirve para copiar exactamente con todos los atributos y opciones una lista
frutas2 = frutas.copy()
print(frutas2)
| true |
d6cdb9b5554288077e4fa1a58d6e8b7578966da7 | Python | robintema/django-likeable | /likeable/models.py | UTF-8 | 2,766 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #
# django-likeable
#
# See LICENSE for licensing details.
#
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext as _
class Like(models.Model):
"""
A single "like" for a likeable object.
Aims to be scaling-friendly by avoiding class inheritance.
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='likes',
help_text=_("The user who liked the particular object."),
)
timestamp = models.DateTimeField(
auto_now_add=True,
help_text=_("The date/time when this user liked this object."),
)
content_type = models.ForeignKey(
ContentType,
help_text=_("The content type of the liked object."),
)
object_id = models.CharField(
help_text=_("The primary key of the liked object."),
max_length=250
)
liked = generic.GenericForeignKey(
'content_type',
'object_id',
)
class Meta:
# make sure we can't have a user liking an object more than once
unique_together = (('user', 'content_type', 'object_id'),)
def __unicode__(self):
return _("Like of %(obj)s by %(user)s at %(timestamp)s") % {
'obj': self.liked,
'user': self.user,
'timestamp': self.timestamp.strftime("%Y-%m-%d %H:%M:%S"),
}
class Likeable(models.Model):
"""
Abstract class on which a "likeable" object can be based.
Essentially adds a "likes" relation to the models derived from this
class which allows one simple access to likes.
"""
likes = generic.GenericRelation(
Like,
)
class Meta:
abstract = True
def like(self, user):
"""
Generates a like for this object by the given user.
"""
return Like.objects.create(user=user, liked=self)
def unlike(self, user):
"""
Delete the like for this object by the given user.
"""
content_type = ContentType.objects.get_for_model(self)
object_id = self.pk
try:
like = Like.objects.get(user=user, content_type=content_type, object_id=object_id)
except Like.DoesNotExist:
raise
return like.delete()
def liked(self, user):
"""
Check if the user liked this object.
"""
content_type = ContentType.objects.get_for_model(self)
object_id = self.pk
try:
like = Like.objects.get(user=user, content_type=content_type, object_id=object_id)
return True
except Like.DoesNotExist:
return False
| true |
a68baa0e0cfc18f29974563c6b276f1bf7dd753d | Python | ashwinpn/Computer-Vision | /mesh/src/nerf/tree.py | UTF-8 | 13,843 | 2.875 | 3 | [
"MIT"
] | permissive | import torch
class Node:
def __init__(self, config, bounds, depth):
self.config = config
self.bounds = bounds
self.depth = depth
self.max_depth = self.config.tree.max_depth
if self.depth == 0:
self.count = self.config.tree.subdivision_outer_count
else:
self.count = self.config.tree.subdivision_inner_count
self.weight = 0.
self.sparse = True
self.children = []
def subdivide(self):
if self.depth >= self.max_depth:
return
offset = self.bounds[1] - self.bounds[0]
for i in range(0, self.count):
for g in range(0, self.count):
for h in range(0, self.count):
ind1 = torch.tensor([i, g, h], dtype = torch.float) / self.count * offset
ind2 = torch.tensor([i + 1, g + 1, h + 1], dtype = torch.float) / self.count * offset
bounds = self.bounds[0] + ind1, self.bounds[0] + ind2
child = Node(self.config, bounds, self.depth + 1)
self.children.append(child)
def clear(self):
self.children = []
class TreeSampling:
vertex_indices = [
[],
[0],
[1],
[2],
[0, 1],
[1, 2],
[0, 2],
[0, 1, 2],
]
faces_indices = [
0, 2, 1, 2, 4, 1,
0, 3, 2, 2, 3, 5,
0, 1, 6, 6, 3, 0,
1, 4, 7, 7, 6, 1,
3, 6, 7, 7, 5, 3,
2, 7, 4, 7, 2, 5
]
colors_tensor = torch.as_tensor([
[0, 0, 0],
[128, 128, 128],
[128, 128, 128],
[128, 128, 128],
[0, 0, 0],
[128, 128, 128],
[0, 0, 0],
[128, 128, 128],
], dtype=torch.int).unsqueeze(0)
def __init__(self, config, device):
self.config = config
self.device = device
# Initial bounds, normalized
self.ray_near, self.ray_far = self.config.dataset.near, self.config.dataset.far
self.ray_mean = (self.ray_near + self.ray_far) / 2
bounds = torch.tensor([self.ray_near - self.ray_mean] * 3), torch.tensor([self.ray_far - self.ray_mean] * 3)
# Tree root
self.root = Node(self.config, bounds, 0)
self.root.subdivide()
# Tensor (Nx2x3) whose elements define the min/max bounds.
self.voxels = None
# Tree residual data
self.memm = None
self.counter = 1
# Initialize
self.consolidate()
def ticked(self, step):
tree_config = self.config.tree
step_size_tree = tree_config.step_size_tree
step_size_integration_offset = tree_config.step_size_integration_offset
if step > step_size_integration_offset:
curr_step = step - step_size_integration_offset
return curr_step > 0 and curr_step % step_size_tree == 0
return False
def flatten(self):
vertices = []
faces = []
colors = []
for node in self.root.children:
offset = node.bounds[1] - node.bounds[0]
offset_index = len(vertices)
for t in range(8):
tt = node.bounds[0].clone()
tt[TreeSampling.vertex_indices[t]] += offset[TreeSampling.vertex_indices[t]]
vertices.append(tt)
colors.append(TreeSampling.colors_tensor)
faces.append(torch.tensor(TreeSampling.faces_indices) + offset_index)
vertices = torch.stack(vertices, 0)
faces = torch.stack(faces, 0).view(-1, 3).int()
colors = torch.stack(colors, 0).view(-1, 3)
return vertices, faces, colors
def consolidate(self, split = False):
if self.memm is not None:
print(f"Min memm {self.memm.min()}")
print(f"Max memm {self.memm.max()}")
print(f"Mean memm {self.memm.mean()}")
print(f"Median memm {self.memm.median()}")
print(f"Threshold {self.config.tree.eps}")
# Filtering
voxels_indices = torch.arange(self.memm.shape[0])
mask_voxels = self.memm > self.config.tree.eps
mask_voxels_list = voxels_indices[mask_voxels].tolist()
inv_weights = (1.0 - self.memm[mask_voxels]).tolist()
voxel_count_initial = voxels_indices.shape[0]
voxel_count_filtered = (~mask_voxels).sum()
voxel_count_current = len(mask_voxels_list)
print(f"From {voxel_count_initial} voxels with {voxel_count_filtered} filtered to current {voxel_count_current}")
# Nodes closer to the root with high weight have higher priority
voxels_filtered = [ self.root.children[index] for index in mask_voxels_list ]
voxels_filtered = sorted(enumerate(voxels_filtered), key = lambda item: (item[1].depth, inv_weights[item[0]]))
voxels_filtered = [ item[1] for item in voxels_filtered ]
inner_size = self.config.tree.subdivision_inner_count ** 3 - 1
children = []
for index, child in enumerate(voxels_filtered):
# Check if exceeds max cap
exp_voxel_count = len(children) + inner_size + voxel_count_current - index
if exp_voxel_count < self.config.tree.max_voxel_count:
child.subdivide()
if len(child.children) > 0:
children += child.children
else:
children.append(child)
else:
children.append(child)
print(f"Now {len(children)} voxels")
self.root.children = children
self.voxels = [ torch.stack(node.bounds, 0) for node in self.root.children ]
if len(self.voxels) == 0:
print(f"The chosen threshold {self.config.tree.eps} was set too high!")
self.voxels = torch.stack(self.voxels, 0).to(self.device)
self.memm = torch.zeros(self.voxels.shape[0], ).to(self.device)
self.counter = 1
def ray_batch_integration(self, step, ray_voxel_indices, ray_batch_weights, ray_batch_weights_mask):
""" Performs ray batch integration into the nodes by weight accumulation
Args:
step (int): Training step.
ray_voxel_indices (torch.Tensor): Tensor (RxN) batch ray voxel indices.
ray_batch_weights (torch.Tensor): Tensor (RxN) batch ray sample weights.
ray_batch_weights_mask (torch.Tensor): Tensor (RxN) batch ray sample weights mask.
"""
if step < self.config.tree.step_size_integration_offset:
return
elif step == self.config.tree.step_size_integration_offset:
print(f"Began ray batch integration... Step:{step}")
voxel_count = self.voxels.shape[0]
ray_count, ray_samples_count = ray_batch_weights.shape
# accumulate weights
acc = torch.zeros(ray_count, voxel_count, device = self.device)
acc = acc.scatter_add(-1, ray_voxel_indices, ray_batch_weights)
acc = acc.sum(0)
# freq weights
freq = torch.zeros(ray_count, voxel_count, device = self.device)
freq = freq.scatter_add(-1, ray_voxel_indices, ray_batch_weights_mask)
freq = freq.sum(0)
mask = freq > 0
# distribute weights (voxel/accumulations) while being numerically stable
self.memm[mask] += (acc[mask] / freq[mask] - self.memm[mask]) / self.counter
self.counter += 1
def extract_(self, bounds, signs):
out = bounds[signs]
out = out.transpose(1, 2)
out = out[:, :, [0, 1, 2], [0, 1, 2]]
return out[:, :, None, :]
def batch_ray_voxel_intersect(self, origins, dirs, near, far, samples_count = 64):
""" Returns batch of min and max intersections with their indices.
Args:
origins (torch.Tensor): Tensor (1x3) whose elements define the ray origin positions.
dirs (torch.Tensor): Tensor (Rx3) whose elements define the ray directions.
Returns:
z_vals (torch.Tensor): intersection samples as ray direction scalars
indices (torch.Tensor): indices of valid intersections
ray_mask (torch.Tensor): ray mask where valid intersections
"""
bounds = self.voxels
rays_count, voxels_count = dirs.shape[0], bounds.shape[0],
inv_dirs = 1 / dirs
signs = (inv_dirs < 0).long()
inv_signs = 1 - signs
origins = origins[:, None, None, :]
inv_dirs = inv_dirs[:, None, None, :]
bounds = bounds.transpose(0, 1)
# Min, max intersections
tvmin = ((self.extract_(bounds, signs) - origins) * inv_dirs).squeeze(2)
tvmax = ((self.extract_(bounds, inv_signs) - origins) * inv_dirs).squeeze(2)
# Keep track non-intersections
mask = torch.ones((rays_count, voxels_count,), dtype = torch.bool, device = bounds.device)
# y-axis filter & intersection
# DeMorgan's law ~(tvmin[..., 0] > tvmax[..., 1] or tvmin[..., 1] > tvmax[..., 0])]
mask = mask & (tvmin[..., 0] <= tvmax[..., 1]) & (tvmin[..., 1] <= tvmax[..., 0])
# y-axis
mask_miny = tvmin[..., 1] > tvmin[..., 0]
tvmin[..., 0][mask_miny] = tvmin[mask_miny][..., 1]
mask_maxy = tvmax[..., 1] < tvmax[..., 0]
tvmax[..., 0][mask_maxy] = tvmax[mask_maxy][..., 1]
# z-axis filter & intersection
# DeMorgan's law ~(tvmin[..., 0] > tvmax[..., 2]) or (tvmin[..., 2] > tvmax[..., 0])
mask = mask & (tvmin[..., 0] <= tvmax[..., 2]) & (tvmin[..., 2] <= tvmax[..., 0])
# z-axis
mask_minz = tvmin[..., 2] > tvmin[..., 0]
tvmin[..., 0][mask_minz] = tvmin[mask_minz][..., 2]
mask_maxz = tvmax[..., 2] < tvmax[..., 0]
tvmax[..., 0][mask_maxz] = tvmax[mask_maxz][..., 2]
# find intersection scalars within range [ near, far ]
intersections = torch.stack((tvmin[..., 0], tvmax[..., 0]), -1)
# ray cap
mask = mask & ((intersections[..., 0] >= near) & (intersections[..., 1] <= far))
# mask outliers
ray_mask = mask.sum(-1) > 0
# see this https://github.com/pytorch/pytorch/issues/43768
ray_rel = ray_mask.sum()
if ray_rel == 0:
indices = torch.ones(rays_count, samples_count, device = bounds.device)
return torch.rand_like(indices), indices.long(), ray_mask
if self.config.tree.use_random_sampling:
# apply small weight for non-intersections
weights = torch.ones((rays_count, voxels_count,), device = bounds.device)
# apply noise
weights[~mask] = 1e-12
# sample intersections
samples = torch.multinomial(weights, samples_count, replacement = True)
# Gather intersection samples
samples_indices = samples[..., None].expand(-1, -1, 2)
values = intersections.gather(-2, samples_indices)
# Random sampling
values_min, values_max = values[..., 0], values[..., 1]
value_samples = torch.rand_like(values_min, device = bounds.device)
z_vals = values_min + (values_max - values_min) * value_samples
else:
# Sort the intersections and mask of relevant crosses by min crossing
crosses_sorted = intersections[..., 0].sort(-1)
crosses_samples = crosses_sorted.indices[..., None].expand(*crosses_sorted.indices.shape, 2)
intersections = intersections.gather(-2, crosses_samples)
crosses_mask_sorted = mask.gather(-1, crosses_sorted.indices)
# Roll relevant crosses at the start
crosses_start = crosses_mask_sorted.long().sort(descending=True)
crosses_start_mask = crosses_start.values.bool()
res = torch.zeros_like(intersections)
res[crosses_start_mask] = intersections[crosses_mask_sorted]
# Distance crosses
residuals = res[..., 1] - res[..., 0]
# Cumulative sum distances
residuals_cums = torch.cumsum(residuals, -1)
# Sampling interval scaled by the total cross distance
samples = torch.linspace(0, 1.0, samples_count, device = bounds.device)
samples = samples * residuals_cums[..., -1][..., None]
# Cross bucket indices
cross_indices = torch.searchsorted(residuals_cums, samples)
# Group by bucket (crosses) as offset from start (0, 0, 2, 2, 4, 4, 4, ...)
samples_positions = torch.searchsorted(cross_indices, cross_indices, right=False)
# Find the sample offset relative to the bucket (cross) 1st sample closest to the near plane
samples_offset = samples - samples.gather(-1, samples_positions)
# Min cross translate by the offset
z_vals = res[..., 0].gather(-1, cross_indices) + samples_offset
# Map indices to the corresponding voxels
indices = crosses_start.indices.gather(-1, cross_indices)
samples = crosses_sorted.indices.gather(-1, indices)
# Order the samples
z_vals, indices_ordered = z_vals.sort(-1)
# Order voxels indices relative to the samples
indices = samples.gather(-1, indices_ordered)
return z_vals, indices, ray_mask
def serialize(self):
return {
"root": self.root,
"voxels": self.voxels,
"memm": self.memm,
"counter": self.counter
}
def deserialize(self, dict):
print("Loaded tree from checkpoint...")
self.root = dict["root"]
self.voxels = dict["voxels"].to(self.device)
self.memm = dict["memm"].to(self.device)
self.counter = dict["counter"]
| true |
7d437cf3d540feba8b44cf58fdabb34ac8380261 | Python | mateuscmartins-1/Space_Run | /tela_inicial.py | UTF-8 | 848 | 2.625 | 3 | [
"CC-BY-4.0"
] | permissive | import pygame
from config import FPS, QUIT, INTRODUCTION
from assets import MUSICA_ENTRADA, load_assets
def tela_inicial(janela):
assets = load_assets()
clock = pygame.time.Clock()
tela_de_inicio = pygame.image.load('imgs/Spacerun.png').convert()
tela_de_inicio_rect = tela_de_inicio.get_rect()
jogo = True
while jogo:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
state = QUIT
jogo = False
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RETURN:
state = INTRODUCTION
jogo = False
assets[MUSICA_ENTRADA].play()
janela.fill((0,0,0))
janela.blit(tela_de_inicio, tela_de_inicio_rect)
pygame.display.flip()
return state
| true |
23cc903479cba9587bad7e7a3a7f5675cf0f0445 | Python | MarshallMoler/django_project | /meiduo_mall/meiduo_mall/apps/users/utils.py | UTF-8 | 799 | 2.828125 | 3 | [] | no_license | from django.contrib.auth.backends import ModelBackend
import re
from .models import User
def get_user_account(account):
'''判断account是否是手机号,并返回user'''
try:
if re.match('^1[3-9]\d{9}$',account):
# 根据手机号获得用户名
user = User.objects.get(mobile=account)
else:
# 根据用户名获得用户名
user = User.objects.get(username=account)
except Exception:
return None
else:
return user
class UsernameMobileAuthBackend(ModelBackend):
'''自定义用户认证后端'''
def authenticate(self, request, username=None, password=None, **kwargs):
user = get_user_account(username)
if user and user.check_password(password):
return user
| true |
90d7f20d2b670bdaca28a5c84ffb93b671b412a2 | Python | kexinshine/leetcode | /287.寻找重复数.py | UTF-8 | 302 | 2.578125 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=287 lang=python3
#
# [287] 寻找重复数
#
# @lc code=start
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
n=len(nums)
d=[0]*n
for i in nums:
d[i]+=1
if d[i]>1:
return i
# @lc code=end
| true |
461af85e3a77e2f97bf2261adf8296012543c389 | Python | juliafealves/tst-lp1 | /unidade-3/ano-bissexto/ano_bissexto.py | UTF-8 | 300 | 3.59375 | 4 | [] | no_license | # coding: utf-8
# Aluno: Júlia Alves
# Matricula: 117211383
# Atividade: Ano Bissexto - Unidade 3
ano = int(raw_input())
mensagem = "não é bissexto"
# Verifica se o ano é bissexto.
if (ano % 400 == 0) or (ano % 4 == 0 and ano % 100 != 0):
mensagem = "é bissexto"
print "%i %s" % (ano, mensagem) | true |