blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
752b01127105406114550b193daf1356929b335a | Python | burhan/pyipay | /fsspyipay/pyipay.py | UTF-8 | 8,334 | 2.59375 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | # -*- coding: utf-8 -*-
import time
import xml.etree.ElementTree as ET
from urllib import parse
from iso4217 import Currency
from .util import Action
from .util import decrypt_payload as decrypt
from .util import encrypt_payload as encrypt
from .util import read_resource_file, sanitize, validate_gw_url
from .util import check_if_set, LANG_LOOKUP
from .exceptions import InvalidGatewayResponse
class Gateway:
"""A class that represents a payment gateway for hosted transactions, using the resource kit.
The first step is to initialize the gateway by providing it the configuration received
from the bank. This configuration consists of the following:
- A resource file (called resource.cgn), which is an encrypted file that contains
connection and credentials for the gateway.
- A keystore file (called keystore.bin), which is a Java keystore that contains
the encrption key which is used to decrypt the resource file.
- The alias, which is a string. This is the friendly name of your terminal as
setup by the bank. It it used to extract the correct configuration from the
resource file.
In addition to the above three values, you must also pass in the amount
that you want to charge the customer for this transaction.
:param str keystore: The file system path to the keystore file (normally called keystore.bin)
:param str resource: The file system path to the resource file (normally called resource.cgn)
:param str alias: The terminal alias that is to be used.
:param float amount: The amount that needs to be charged. 3 decimal places by default for KWD.
The following can be optionally specified, but are set to reasonable defaults:
:param str lang: The language which will be used for the hosted pages to the end user. Values are 'en' (for English), and 'ar' for Arabic. The default is 'en'
:param str currency: The ISO currency code. Defaults to 'KWD'. If you change this, make sure the terminal supports the currency.
:param str tracking_id: This is a unique id for each transaction that is required by the gateway. If not supplied, the current timestamp is used.
Once you have created an instance of the Gateway, you can specify further configuration. There are two pieces of information that are
critical, the response url and the error url.
These urls cannot have any query strings or fragments; and must be publicly accessible as this is where the gateway will return the results
of the transactions.
Finally, you request the redirect url from the module; which is where you should redirect the customer to start the payment process.
The example provides details on how this all works:
:Example:
>>> from fsspyipay import Gateway
>>> resource_file = r'/home/me/resource.cgn'
>>> keystore_file = r'/home/me/keystore.bin'
>>> gw = Gateway(keystore_file, resource_file, 'alias', 4.546)
>>> gw.response_url = 'https://example.com/response'
>>> gw.error_url = 'https://example.com/error'
>>> url = gw.generate_purchase_request()
"""
def __init__(
self,
keystore,
resource,
alias,
amount,
lang="en",
currency='KWD',
tracking_id=None,
):
self.lang = LANG_LOOKUP.get(lang, 'USA')
self.currency = Currency(currency).number
self.amount = amount
# Configure the terminal settings
terminal_configuration = read_resource_file(resource, keystore, alias)
root = ET.fromstring(terminal_configuration)
terminal = {c.tag: c.text for c in root}
self.password = terminal["password"]
self.key = terminal["resourceKey"]
self.portal_id = terminal["id"]
self._pgw_url = terminal["webaddress"]
if tracking_id is None:
now = int(time.time())
self.tracking_id = f"{now}"
# Setting these as private, since we will manipulate the values
# and sanitize them before setting the final value
self._udf1 = self._udf2 = self._udf3 = self._udf4 = self._udf5 = ""
self._response_url = None
self._error_url = None
def get_udf1(self):
return self._udf1
def set_udf1(self, s):
self._udf1 = sanitize(s)
def get_udf2(self):
return self._udf2
def set_udf2(self, s):
self._udf2 = sanitize(s)
def get_udf3(self):
return self._udf3
def set_udf3(self, s):
self._udf3 = sanitize(s)
def get_udf4(self):
return self._udf4
def set_udf4(self, s):
self._udf4 = sanitize(s)
def get_udf5(self):
return self._udf5
def set_udf5(self, s):
self._udf5 = sanitize(s)
def set_response_url(self, s):
self._response_url = validate_gw_url(s)
def set_error_url(self, s):
self._error_url = validate_gw_url(s)
def get_response_url(self):
return self._response_url
def get_error_url(self):
return self._error_url
udf1 = property(
get_udf1, set_udf1, None, "Set and retrieve UDF1, the values are sanitized."
)
udf2 = property(
get_udf2, set_udf2, None, "Set and retrieve UDF2, the values are sanitized."
)
udf3 = property(
get_udf3, set_udf3, None, "Set and retrieve UDF3, the values are sanitized."
)
udf4 = property(
get_udf4, set_udf4, None, "Set and retrieve UDF4, the values are sanitized."
)
udf5 = property(
get_udf5, set_udf5, None, "Set and retrieve UDF5, the values are sanitized."
)
response_url = property(
get_response_url, set_response_url, None, "Set and retrieve the response URL"
)
error_url = property(
get_error_url, set_error_url, None, "Set and retrieve the error URL"
)
def _build_param_dict(self, action):
params = []
if action == 1:
params.extend(
[
("action", action),
("id", self.portal_id),
("password", self.password),
("langid", self.lang),
("currencycode", self.currency),
("trackid", self.tracking_id),
("amt", self.amount),
("udf1", self.udf1),
("udf2", self.udf2),
("udf3", self.udf3),
("udf4", self.udf4),
("udf5", self.udf5),
("responseURL", self.response_url),
("errorURL", self.error_url),
]
)
return dict(params)
def _build_payload(self, action):
params = self._build_param_dict(action.value)
# filter out empty values, and set `safe` to avoid
# encoding URLs, which is not supported by the platform.
d = parse.urlencode({k: v for k, v in params.items() if v}, safe=":/")
payload = encrypt(d, self.key)
return payload.decode("utf-8")
def _get_redirect_url(self, action):
payload = self._build_payload(action.value)
pgw_url = self._pgw_url
gw_url = f"{pgw_url}/PaymentHTTP.htm?param=paymentInit&trandata={payload}"
gw_url = f"{gw_url}&tranportalId={self.portal_id}&responseURL={self.response_url}&errorURL={self.error_url}"
return gw_url
def _parse_response(self, raw_response):
results = parse.parse_qs(raw_response)
try:
encrypted_payload = results.pop("trandata")
except KeyError:
raise InvalidGatewayResponse("Invalid response from the gateway, missing encrypted data")
decrypted_results = parse.parse_qs(decrypt(encrypted_payload[0], self.key))
return results, decrypted_results
def get_result(self, response):
common, decrypted = self._parse_response(response)
d = {k: v[0] for k, v in common.items()}
for k, v in decrypted.items():
if k not in d:
d[k] = v[0]
return d
@check_if_set(['response_url', 'error_url'])
def generate_purchase_request(self):
redirect_to = self._get_redirect_url(Action.PURCHASE)
return redirect_to
| true |
198079cf81363cb3356b1fd1323d362fce1c3543 | Python | Praful932/PySyft | /tests/syft/core/pointer/pointer_test.py | UTF-8 | 2,810 | 2.5625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | # third party
import pytest
import torch as th
# syft absolute
import syft as sy
@pytest.mark.parametrize("with_verify_key", [True, False])
def test_make_searchable(with_verify_key: bool) -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
client = bob.get_client()
ten = th.tensor([1, 2])
ptr = ten.send(root_client)
assert len(client.store) == 0
if with_verify_key:
ptr.update_searchability(target_verify_key=client.verify_key)
else:
ptr.update_searchability()
assert len(client.store) == 1
@pytest.mark.parametrize("with_verify_key", [True, False])
def test_make_unsearchable(with_verify_key: bool) -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
client = bob.get_client()
ten = th.tensor([1, 2])
ptr = ten.send(root_client)
if with_verify_key:
ptr.update_searchability(target_verify_key=client.verify_key)
else:
ptr.update_searchability()
assert len(client.store) == 1
if with_verify_key:
ptr.update_searchability(searchable=False, target_verify_key=client.verify_key)
else:
ptr.update_searchability(searchable=False)
assert len(client.store) == 0
def test_searchable_property() -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
client = bob.get_client()
ten = th.tensor([1, 2])
ptr = ten.send(root_client)
assert len(client.store) == 0
ptr.searchable = False
assert len(client.store) == 0
ptr.searchable = True
assert len(client.store) == 1
ptr.searchable = True
assert len(client.store) == 1
ptr.searchable = False
assert len(client.store) == 0
def test_tags() -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
ten = th.tensor([1, 2])
ten = ten.tag("tag1", "tag1", "other")
assert ten.tags == ["tag1", "other"]
# .send without `tags` passed in
ptr = ten.send(root_client)
assert ptr.tags == ["tag1", "other"]
# .send with `tags` passed in
ptr = ten.send(root_client, tags=["tag2", "tag2", "other"])
assert ten.tags == ["tag2", "other"]
assert ptr.tags == ["tag2", "other"]
def test_description() -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
ten = th.tensor([1, 2])
ten = ten.describe("description 1")
assert ten.description == "description 1"
# .send without `description` passed in
ptr = ten.send(root_client)
assert ptr.description == "description 1"
# .send with `description` passed in
ptr = ten.send(root_client, description="description 2")
assert ten.description == "description 2"
assert ptr.description == "description 2"
| true |
c4ec37b49b66b8c689e6d07cf5837867fe489f53 | Python | mukundajmera/competitiveprogramming | /Sorting/Shell Sort.py | UTF-8 | 436 | 3.640625 | 4 | [] | no_license | #User function Template for python3
def shellSort(arr, n):
# code here
return arr.sort()
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == "__main__":
t = int(input())
while(t>0):
n = int(input())
arr = [int(x) for x in input().strip().split()]
shellSort(arr, n)
for ele in arr:
print(ele, end=" ")
print()
t=t-1
# } Driver Code Ends | true |
0d452535151497471735edc58ac2e9d83dfa1ce0 | Python | simonthor/decay2phasespace | /fulldecay.py | UTF-8 | 9,110 | 2.796875 | 3 | [] | no_license | from phasespace import GenParticle
from particle import Particle
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
from mass_functions import _DEFAULT_CONVERTER
from typing import Callable, Union
import itertools
_MASS_WIDTH_TOLERANCE = 0.01
_DEFAULT_MASS_FUNC = 'rel-BW'
class FullDecay:
"""
A container that works like GenParticle that can handle multiple decays
"""
def __init__(self, gen_particles: list[tuple[float, GenParticle]]):
"""
Create an instance of FullDecay
Parameters
----------
gen_particles : list[tuple[float, GenParticle]]
All the GenParticles and their corresponding probabilities.
The list must be of the format [[probability, GenParticle instance], [probability, ...
Notes
-----
Input format might change
"""
self.gen_particles = gen_particles
@classmethod
def from_dict(cls, dec_dict: dict, mass_converter: dict[str, Callable] = None, tolerance: float = _MASS_WIDTH_TOLERANCE):
"""
Create a FullDecay instance from a dict in the decaylanguage format.
Parameters
----------
dec_dict : dict
The input dict from which the FullDecay object will be created from.
mass_converter : dict[str, Callable]
A dict with mass function names and their corresponding mass functions.
These functions should take the average particle mass and the mass width as inputs
and return a mass function that phasespace can understand.
This dict will be combined with the predefined mass functions in this package.
tolerance : float
Minimum mass width of the particle to use a mass function instead of assuming the mass to be constant.
Returns
-------
FullDecay
The created FullDecay object.
"""
if mass_converter is None:
total_mass_converter = _DEFAULT_CONVERTER
else:
# Combine the mass functions specified by the package to the mass functions specified from the input.
total_mass_converter = {**_DEFAULT_CONVERTER, **mass_converter}
gen_particles = _recursively_traverse(dec_dict, total_mass_converter, tolerance=tolerance)
return cls(gen_particles)
def generate(self, n_events: int, normalize_weights: bool = False,
**kwargs) -> Union[tuple[list[tf.Tensor], list[tf.Tensor]], tuple[list[tf.Tensor], list[tf.Tensor], list[tf.Tensor]]]:
"""
Generate four-momentum vectors from the decay(s).
Parameters
----------
n_events : int
Total number of events combined, for all the decays.
normalize_weights : bool
Normalize weights according to all events generated. This also changes the return values.
See the phasespace documentation for more details.
kwargs
Additional parameters passed to all calls of GenParticle.generate
Returns
-------
The arguments returned by GenParticle.generate are returned. See the phasespace documentation for details.
However, instead of being 2 or 3 tensors, it is 2 or 3 lists of tensors, each entry in the lists corresponding
to the return arguments from the corresponding GenParticle instances in self.gen_particles.
Note that when normalize_weights is True, the weights are normalized to the maximum of all returned events.
"""
# Input to tf.random.categorical must be 2D
rand_i = tf.random.categorical(tnp.log([[dm[0] for dm in self.gen_particles]]), n_events)
# Input to tf.unique_with_counts must be 1D
dec_indices, _, counts = tf.unique_with_counts(rand_i[0])
counts = tf.cast(counts, tf.int64)
weights, max_weights, events = [], [], []
for i, n in zip(dec_indices, counts):
weight, max_weight, four_vectors = self.gen_particles[i][1].generate(n, normalize_weights=False, **kwargs)
weights.append(weight)
max_weights.append(max_weight)
events.append(four_vectors)
if normalize_weights:
total_max = tnp.max([tnp.max(mw) for mw in max_weights])
normed_weights = [w / total_max for w in weights]
return normed_weights, events
return weights, max_weights, events
def _unique_name(name: str, preexisting_particles: set[str]) -> str:
"""
Create a string that does not exist in preexisting_particles based on name.
Parameters
----------
name : str
Name that should be
preexisting_particles : set[str]
Preexisting names
Returns
-------
name : str
Will be `name` if `name` is not in preexisting_particles or of the format "name [i]" where i will begin at 0
and increase until the name is not preexisting_particles.
"""
if name not in preexisting_particles:
preexisting_particles.add(name)
return name
name += ' [0]'
i = 1
while name in preexisting_particles:
name = name[:name.rfind('[')] + f'[{str(i)}]'
i += 1
preexisting_particles.add(name)
return name
def _get_particle_mass(name: str, mass_converter: dict[str, Callable], mass_func: str,
tolerance: float = _MASS_WIDTH_TOLERANCE) -> Union[Callable, float]:
"""
Get mass or mass function of particle using the particle package.
Parameters
----------
name : str
Name of the particle. Name must be recognizable by the particle package.
tolerance : float
See _recursively_traverse
Returns
-------
Callable, float
Returns a function if the mass has a width smaller than tolerance.
Otherwise, return a constant mass.
TODO try to cache results for this function in the future for speedup.
"""
particle = Particle.from_evtgen_name(name)
if particle.width <= tolerance:
return tf.cast(particle.mass, tf.float64)
# If name does not exist in the predefined mass distributions, use Breit-Wigner
return mass_converter[mass_func](mass=particle.mass, width=particle.width)
def _recursively_traverse(decaychain: dict, mass_converter: dict[str, Callable],
preexisting_particles: set[str] = None, tolerance: float = _MASS_WIDTH_TOLERANCE) -> list[tuple[float, GenParticle]]:
"""
Create all possible GenParticles by recursively traversing a dict from decaylanguage.
Parameters
----------
decaychain: dict
Decay chain with the format from decaylanguage
preexisting_particles : set
names of all particles that have already been created.
tolerance : float
Minimum mass width for a particle to set a non-constant mass to a particle.
Returns
-------
list[tuple[float, GenParticle]]
The generated particle
"""
original_mother_name = list(decaychain.keys())[0] # Get the only key inside the dict
if preexisting_particles is None:
preexisting_particles = set()
is_top_particle = True
else:
is_top_particle = False
# This is in the form of dicts
decay_modes = decaychain[original_mother_name]
mother_name = _unique_name(original_mother_name, preexisting_particles)
# This will contain GenParticle instances and their probabilities
all_decays = []
for dm in decay_modes:
dm_probability = dm['bf']
daughter_particles = dm['fs']
daughter_gens = []
for daughter_name in daughter_particles:
if isinstance(daughter_name, str):
# Always use constant mass for stable particles
daughter = GenParticle(_unique_name(daughter_name, preexisting_particles),
Particle.from_evtgen_name(daughter_name).mass)
daughter = [(1., daughter)]
elif isinstance(daughter_name, dict):
daughter = _recursively_traverse(daughter_name, mass_converter, preexisting_particles, tolerance=tolerance)
else:
raise TypeError(f'Expected elements in decaychain["fs"] to only be str or dict '
f'but found an instance of type {type(daughter_name)}')
daughter_gens.append(daughter)
for daughter_combination in itertools.product(*daughter_gens):
p = tnp.prod([decay[0] for decay in daughter_combination]) * dm_probability
if is_top_particle:
mother_mass = Particle.from_evtgen_name(original_mother_name).mass
else:
mother_mass = _get_particle_mass(original_mother_name, mass_converter=mass_converter,
mass_func=dm.get('zfit', _DEFAULT_MASS_FUNC), tolerance=tolerance)
one_decay = GenParticle(mother_name, mother_mass).set_children(
*[decay[1] for decay in daughter_combination])
all_decays.append((p, one_decay))
return all_decays
| true |
3c3b6894dddc3bc5dae18247864610894d25a9b4 | Python | vivekdubeyvkd/python-utilities | /get_index_for_unique_char_in_string.py | UTF-8 | 458 | 3.109375 | 3 | [] | no_license | outputDict = {}
def getUniqueCharacter(s):
for i in range(len(s)):
charVal = s[i]
if charVal not in outputDict.keys():
outputDict[charVal] = i + 1
else:
outputDict[charVal] = -1
for key in outputDict.keys():
if outputDict[key] != -1:
return outputDict[key]
return -1
getUniqueCharacter("madam")
getUniqueCharacter("falafal")
getUniqueCharacter("hackthegame")
| true |
7b4e1b3df90f27b76acbbeb727c5bfd00bf23865 | Python | markbac17/web-trader | /app/view.py | UTF-8 | 2,615 | 3.578125 | 4 | [] | no_license | import os
def user_menu():
print('Welcome to Terminal Trader.\n')
print('User logon\n')
print('1. Login with username')
print('2. Login with API key')
print('3. Create account')
print('4. Exit\n')
choice = input('Select option: ')
os.system('clear')
return choice
def login_username_pw():
print('\nLogon with username & password\n')
username = input('Enter username: ')
password = input('Enter password: ')
os.system('clear')
return username, password
def login_api_key():
print('\nLogon with API key\n')
user_api_key = input('API key: ')
os.system('clear')
return user_api_key
def display_user_from_api_key(user):
os.system('clear')
if user:
print('Account #:{}, User: {}, pw hash: {}, balance: {}, api key: {}'
.format(user[0], user[1], user[2], user[3], user[4]))
else:
print(user)
def create_account():
os.system('clear')
print('\nCreate new account\n')
username = input('Create user name: ')
password = input('Create password: ')
balance = input('Enter amount to deposit: ')
return username, password, balance
def main_menu():
print('Terminal Trader user options:\n' )
print('1. View balance')
print('2. Deposit')
print('3. Buy')
print('4. Sell')
print('5. View positions')
print('6. View trades')
print('7. Lookup stock price')
print('8. Lookup API key')
print('9. Logout\n')
choice = input('Enter choice: ')
os.system('clear')
return choice
def display_balance(balance):
os.system('clear')
print('\nAccount balance is: ${}\n'.format(balance))
def deposit():
print('\nDeposit funds\n')
balance = input('Enter amount to deposit: ')
os.system('clear')
return float(balance)
def display_trade_history(trade_history):
print()
for item in trade_history:
print(item)
print()
def display_positions(positions):
print()
for item in positions:
print(item)
print()
def display_api(api_key):
print('\nAPI key is: {}\n'.format(api_key[4]))
def trade_stock():
print()
ticker = input('Enter stock ticker: ')
quantity = input('Enter quantity: ')
return ticker.upper(), int(quantity)
def lookup_ticker():
print('\nLookup ticker\n')
ticker = input('Lookup ticker: ')
return ticker.upper()
def transaction_error(err_message):
print(err_message)
def goodbye():
print('\nThank you for using Terminal Trader')
exit()
def bad_input(error_message):
os.system('clear')
print('\n{}. Retry!\n'.format(error_message)) | true |
350bdb1ac7160a4c4d6a30d9f59c7f5767f01f8f | Python | AurelOnuzi/Scripts | /NameSearchGenerator.py | UTF-8 | 3,424 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#Aurel Onuzi
import csv
import os.path
import sys
#user input
names_file = input('Enter a text file with a list of name: ')
nickname_file = input('Enter a file with a list of nicknames, or just enter 0 to skip this step: ')
name_var = input('Select name variation: Single Line or Multiple Lines ( either enter single(or type 0) or enter multiple(or type 1) ): ')
#dictionary where firstname is the key and nicknames are the values, 1:N pair
nickname_dict = {}
#some error handling with file names
if not os.path.isfile(names_file):
raise SystemError('No file found')
if not names_file.endswith('.txt'):
names_file = names_file+'.txt'
if not nickname_file.endswith('.txt'):
nickname_file = nickname_file+'.txt'
if name_var.strip().lower() == 'single':
name_var = '0'
elif name_var.strip().lower() == 'multiple':
name_var = '1'
elif name_var.strip() != '0' and name_var.strip() != '1':
raise SystemError('Wrong selection was given')
def name_variations(first,last):
if name_var == '0':
print('(',end="")
nickname_single_variations(first,last)
print(')',end="")
if name_var == '1':
nickname_multi_variations(first,last)
def generate_single_var(first,last):
print('{0} {1} OR {1}, {0} OR {2}{0} OR {0} w/2 {1}'.format(first, last, last[:1]), end="")
#third variation generates first initial of last name followed by the first name similar to the example
#if you meant the first initial of the first name followed by the last name, change it to the line below
#print('{0} {1} OR {1}, {0} OR {2}{1} OR {0} w/2 {1}'.format(first, last, first[:1]), end="")
def generate_multi_var(first,last):
print('{0} {1}\n{1}, {0}\n{2}{0}\n{0} w/2 {1}'.format(first, last, first[:1]))
def populate_dict(file):
#creating a dictionary with first names as key to nicknames as values
with open(file) as f:
reader = csv.reader(f)
for row in reader:
if row: #ignoring blank lines
if row[0] in nickname_dict:
nickname_dict[row[0]].append(row[1])
elif row:
nickname_dict[row[0]] = [row[1]]
def nickname_single_variations(firstname,last):
# initial name variation
generate_single_var(firstname,last)
#nickname variations
for key, val in nickname_dict.items():
if key == firstname:
for val in nickname_dict[key]:
print(' OR ',end="")
generate_single_var(val,last)
def nickname_multi_variations(firstname,last):
generate_multi_var(firstname,last)
for key, val in nickname_dict.items():
if key == firstname:
for val in nickname_dict[key]:
generate_multi_var(val,last)
def main():
if not nickname_file == '0': #nicknames file available
populate_dict(nickname_file)
base_file = os.path.basename(names_file)
new_file = os.path.splitext(base_file)[0] + '_Search.txt'
origal_stdout = sys.stdout #for reverting back to original standard output
with open(names_file,'r') as input_file, open(new_file,'w') as output_file:
sys.stdout = output_file #sending data from standard output to the file instead
for line in input_file:
name = line.split()
name_variations(name[0],name[1])
sys.stdout = origal_stdout
if __name__ == "__main__":
main() | true |
20aa160dfbc17c9391dc4db209dd2149900db9f0 | Python | atomichighfive/tealeaves | /tealeaves/util/histogram_bin_formulas.py | UTF-8 | 2,085 | 3.515625 | 4 | [
"MIT"
] | permissive | # Author: Eyad Sibai
import numpy as np
from scipy.stats import iqr
def histogram_square_formula(values):
""" reference https://www.kdnuggets.com/2018/02/histogram-tips-tricks.html
:param values: the values of the histogram data
:return: number of bins and width of the bin
"""
bins = np.sqrt(len(values))
bin_width = (np.max(values) - np.min(values)) / bins
return bins, bin_width
def histogram_sturges_formula(values):
"""
:param values:
:return:
"""
bins = np.ceil(np.log2(len(values))) + 1
bin_width = (np.max(values) - np.min(values)) / bins
return bins, bin_width
def histogram_rice_formula(values):
"""
:param values:
:return:
"""
bins = 2 * np.power(len(values), 1. / 3)
bin_width = (np.max(values) - np.min(values)) / bins
return bins, bin_width
def histogram_scott_formula(values):
"""
:param values:
:return:
"""
bin_width = 3.5 * np.std(values) / np.power(len(values), 1 / 3)
bins = (np.max(values) - np.min(values)) / bin_width
return bins, bin_width
def histogram_freedman_diaconis_formula(values):
"""
:param values:
:return:
"""
bin_width = 2 * iqr(values) / np.power(len(values), 1 / 3)
bins = (np.max(values) - np.min(values)) / bin_width
return bins, bin_width
# Author: Andreas Syrén
def bin_it(values, formula='freedman_diaconis'):
values = [v for v in values if np.isfinite(v)]
if formula == 'square':
bins, bin_width = histogram_square_formula(values)
elif formula == 'sturges':
bins, bin_width = histogram_sturges_formula(values)
elif formula == 'rice':
bins, bin_width = histogram_rice_formula(values)
elif formula == 'scott':
bins, bin_width = histogram_scott_formula(values)
elif ('freedman' in formula) or ('diaconis' in formula):
bins, bin_width = histogram_freedman_diaconis_formula(values)
else:
raise ValueError("\"%s\" not in implemented formulas." % str(formula))
return np.linspace(np.min(values), np.max(values), min(500, bins))
| true |
a65da1c2906756d6e21c3d0aca40fe7fb0e1b7d2 | Python | liridonuka/python-projects | /web_gis/web_gis/note.py | UTF-8 | 43 | 2.5625 | 3 | [] | no_license | a = "Hello World. I am here"
print(len(a))
| true |
1e0df36fbeb22738110683edb35dd6594245a6b2 | Python | cixinxc/Python | /classifier/perceptron/perceptron.py | UTF-8 | 2,368 | 2.828125 | 3 | [] | no_license | from numpy import *
import matplotlib.pyplot as plt
def drawdata(train_x, train_y, train_label,bc,test_x):
for i in range( train_x.shape[0] ):
print(train_x[i, 0], train_x[i, 1])
if train_label[0, i] == -1:
plt.plot(train_x[i, 0], train_x[i, 1], 'or')
elif train_label[0, i] == 1:
plt.plot(train_x[i, 0], train_x[i, 1], 'ob')
for i in range( test_x.shape[0] ):
if train_label[0, 80+i] == -1:
plt.plot(test_x[i, 0], test_x[i, 1], 'oy')
elif train_label[0, 80+i] == 1:
plt.plot(test_x[i, 0], test_x[i, 1], 'ow')
train_y = train_y.T
w, b = perceptron(train_x, train_y, bc)
print(w, b)
y1 = (-b - (2 * w[0, 0])) / (1.0 * w[0, 1])
y2 = (-b - (6 * w[0, 0])) / (1.0 * w[0, 1])
plt.plot([2,6], [y1, y2])
plt.xlabel("x value")
plt.ylabel("y value")
plt.title("a simple example")
plt.plot([-6, -6], [4, 10], '-g')
plt.show()
def perceptron(train_x, train_y, bc):
w = [0.0, 0.0]
w = mat(w)
b = 0.0
wc = 0.0
count = 0
error = 1
print(train_x.shape)
while True:
if error == 0:
break
error = 0
for i in range(train_x.shape[0]):
wc = 0.0
while True:
wc = train_y[i, 0]*( train_x[i, 0] * w[0, 0] + train_x[i, 1] * w[0, 1] + b )
if wc <= 0:
print(train_y[i, 0], train_x[i, 0],train_y[i, 0],w[0,0], w[0,1])
w[0, 0] += bc*train_y[i, 0]*train_x[i, 0]
w[0, 1] += bc*train_y[i, 0]*train_x[i, 1]
b += bc*train_y[i, 0]
count += 1
error += 1
else:
break
# if count == 100:
# break
return w, b
def main():
print('main function')
dataSet = []
labels = []
fileIn = open('testSet.txt')
for line in fileIn.readlines():
lineArr = line.strip().split('\t')
dataSet.append([float(lineArr[0]), float(lineArr[1])])
labels.append(float(lineArr[2]))
dataSet = mat(dataSet)
labels = mat(labels)
train_x = dataSet[0:81, :]
train_y = labels[0:81, :]
test_x = dataSet[80:101, :]
test_y = labels[80:101, :]
bc = 0.1
drawdata(train_x, train_y, labels,bc,test_x)
main(); | true |
04d6ae584bd7b4d1bd7bb625e5a762acc52f83b2 | Python | dorabelme/Python-Programming-An-Introduction-to-Computer-Science | /chapter9_programming10.py | UTF-8 | 619 | 3.671875 | 4 | [] | no_license | from random import random
def main():
printIntro()
darts = getNumDarts()
pi_est = simThrows(darts)
printEst(pi_est)
def printIntro():
print("Dart Estimate of Pi")
def getNumDarts():
return eval(input("Enter the number of darts to be thrown: "))
def simThrows(darts):
hits = 0
for i in range(darts):
if hitOneThrow():
hits += 1
pi_est = 4 * (hits / darts)
return pi_est
def hitOneThrow():
x = 2 * random() - 1
y = 2 * random() - 1
if x ** 2 + y ** 2 <= 1:
return True
return False
def printEst(pi_est):
print("\nPi Estimate: {0:0.4f}".format(pi_est))
if __name__ == '__main__':
main()
| true |
9fc26531cfe9a90e3118bdc26a631bd7321c928f | Python | JheniferElisabete/EstruturasDeRepeticao | /Fatorial.py | UTF-8 | 412 | 4.65625 | 5 | [] | no_license | '''Faça um programa que calcule o fatorial de um número inteiro fornecido pelo usuário. Ex.: 5!=5.4.3.2.1=120.
A saída deve ser conforme o exemplo abaixo:
Fatorial de: 5
5! = 5 . 4 . 3 . 2 . 1 = 120'''
f = int(input('Informe um numero para ser calculado o seu fatorial '))
fa = 1
count = f
for i in range(f, 0, -1):
fa = fa * i
print(count, end=" * ")
count -= 1
print("1 = ", fa)
| true |
1cea3524a60f2e91f9f7cdac63934e97220d8552 | Python | daniel-reich/ubiquitous-fiesta | /vnzjuqjCf4MFHGLJp_14.py | UTF-8 | 228 | 3.46875 | 3 | [] | no_license |
def shift_letters(txt, n):
nospace = txt.replace(" ", "")
x = len(nospace)
count = 0
ans = ""
for i in txt:
if i == " ":
ans += " "
else:
ans += nospace[(count-n)%x]
count += 1
return ans
| true |
e6d127517d9edfad0b283f9b8ff3b1eb6864a4d0 | Python | ColCarroll/autobencher | /autobencher/repository.py | UTF-8 | 763 | 2.75 | 3 | [] | no_license | import os
from subprocess import check_call
from abc import ABCMeta, abstractmethod
class SourceRepository(metaclass=ABCMeta):
@classmethod
def makeRepository(cls, url, branch, directory):
return GitRepository(url, branch, directory)
@abstractmethod
def __init__(self):
"""Abstract constructor"""
class GitRepository(SourceRepository):
def __init__(self, url, branch, directory):
if os.path.exists(directory):
cur_dir = os.getcwd()
os.chdir(directory)
pull_command = ['git', 'pull']
check_call(pull_command)
os.chdir(cur_dir)
else:
clone_command = ['git', 'clone', '-b', branch, url, directory]
check_call(clone_command)
| true |
e699ebf0bea25bf4af9871835df32db2f8d128da | Python | kjaffel/ZA_FullAnalysis | /bamboo_/ZAMachineLearning/threadGPU.py | UTF-8 | 5,049 | 2.796875 | 3 | [] | no_license | import os
import sys
import logging
import subprocess
import traceback
from pynvml import *
from pynvml.smi import nvidia_smi
#import nvidia_smi
from time import sleep
from threading import Thread
class utilizationGPU(Thread):
"""
Class generaring a parallel thread to monitor the GPU usage
Initialize with :
thread = utilizationGPU(print_time = int # Frequency of printing the average and current GPU usage
print_current = bool # In addition to average, print current usage
time_step = float) # Time step for sampline
Start thread with:
thread.start()
Will print usage and memory average over the last "print_time" seconds
Stop the thread with
thread.stopLoop() # Important ! : Otherwise the loop will not be stopped properly
thread.join() # Classic
"""
def __init__(self,print_time=60,print_current=False,time_step=0.01):
# Call the Thread class's init function
super(utilizationGPU,self).__init__()
self.print_time = print_time
self.print_current = print_current
self.time_step = time_step
self.GPUs = []
self.occAvgTot = []
self.occAvgStep = []
self.memAvgTot = []
self.memAvgStep = []
self.running = True
try:
nvmlInit()
self.deviceCount = nvmlDeviceGetCount()
# Get list of handles #
logging.info("[GPU] Detected devices are :")
for i in range(self.deviceCount):
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(i)
self.GPUs.append(handle)
logging.info("[GPU] ..... Device %d : %s"%(i, nvmlDeviceGetName(handle)))
# Records #
self.occAvgTot.append(0)
self.occAvgStep.append(0)
self.memAvgTot.append(0)
self.memAvgStep.append(0)
logging.info("[GPU] Will print usage every %d seconds"%self.print_time)
except Exception as e:
logging.error("[GPU] *** Caught exception: %s : %s"%(str(e.__class__),str(e)))
traceback.print_exc()
# Override the run function of Thread class
def run(self):
import random
self.time_step = 0.01
counter = 0
print_counter = 0
while(self.running):
res = []
for i in range(self.deviceCount):
res.append(nvidia_smi.nvmlDeviceGetUtilizationRates(self.GPUs[i]))
# Print every self.print_time #
if print_counter == int(self.print_time/self.time_step):
# Print current #
if self.print_current:
s = "\t[GPU] "
for i in range(self.deviceCount):
s += "Device %d %s : utilization : %d%%, memory : %d%%\t"%(i, nvmlDeviceGetName(self.GPUs[i]),res[i].gpu,res[i].memory)
logging.info(s)
# Print avg #
if self.print_time<60:
logging.info("\n[GPU] Occupation over the last %d seconds"%self.print_time)
else:
minutes = self.print_time//60
seconds = self.print_time%60
logging.info("\n[GPU] Occupation over the last %d minutes, %d seconds"%(minutes,seconds))
s = "[GPU] "
for i in range(self.deviceCount):
self.occAvgStep[i] /= (print_counter*self.time_step)
self.memAvgStep[i] /= (print_counter*self.time_step)
s += "Device %d %s : utilization : %d%%, memory : %d%%\t"%(i, nvmlDeviceGetName(self.GPUs[i]),self.occAvgStep[i],self.memAvgStep[i])
# Reinitialize average #
self.occAvgStep[i] = 0
self.memAvgStep[i] = 0
logging.info(s)
# reset printing counter #
print_counter = 0
# Add to total and step #
for i in range(self.deviceCount):
self.occAvgTot[i] += res[i].gpu*self.time_step
self.occAvgStep[i] += res[i].gpu*self.time_step
self.memAvgTot[i] += res[i].memory*self.time_step
self.memAvgStep[i] += res[i].memory*self.time_step
# Sleep and counters #
print_counter += 1
counter += 1
sleep(self.time_step)
# Print total #
logging.info("[GPU] Average occupation over whole period")
s = "[GPU] "
for i in range(self.deviceCount):
self.occAvgTot[i] /= (counter*self.time_step)
self.memAvgTot[i] /= (counter*self.time_step)
s += "Device %d %s : utilization : %d%%, memory : %d%%\t"%(i, nvmlDeviceGetName(self.GPUs[i]),self.occAvgTot[i],self.memAvgTot[i])
logging.info(s)
def stopLoop(self):
self.running = False
| true |
caced0f1b2eeea9a22fe60ccb2dfae4cb4325a77 | Python | NahusenayH/ComptetiveProgramming | /take1/CD12/ConstructTargetArrayWithMultipleSums.py | UTF-8 | 643 | 3.03125 | 3 | [] | no_license | class Solution:
def isPossible(self, target: List[int]) -> bool:
if len(target) <= 1 and target[0] != 1:
return False
if len(target) == 1 and target[0] == 1:
return True
heap = []
for i in target:
heap.append(-i)
heapq.heapify(heap)
total = sum(target)
while True:
a = -heapq.heappop(heap)
total -= a
if a == 1 or total == 1:
return True
if a < total or a % total == 0:
return False
a %= total
total += a
heapq.heappush(heap, -a)
| true |
41cf8b1f27d8761b8c6e21ceb99023164688536c | Python | jinlygenius/basics | /algorithm/sortings/run.py | UTF-8 | 1,030 | 2.796875 | 3 | [] | no_license |
import os
import sys
import time
BASE_DIR = '/Users/ella/Dropbox/Study/basics'
# BASE_DIR = '/Users/shs/Dropbox/Study/basics'
sys.path.insert(0, BASE_DIR)
import random
from bucket_sort import bucket_sort
from bubble_sort import bubble_sort
from selection_sort import selection_sort
from insertion_sort import insertion_sort
from quick_sort import quick_sort
from merge_sort import merge_sort, merge
from heap_sort import heap_sort, max_heapify, build_max_heap
from shell_sort import shell_sort
from utils.my_decorators import timeit
if __name__ == '__main__':
random_items = [random.randint(-50, 100) for c in range(32)]
print(random_items)
print(len(random_items))
# aaa = [9, 11, 27, 83, 96]
# result = build_max_heap(aaa)
# result = heap_sort(random_items)
time1 = time.time()
# result = merge_sort(random_items, 0, len(random_items))
result = shell_sort(random_items)
time2 = time.time()
print('took: %2.6f sec' % (time2-time1))
print(result)
print(len(result))
| true |
5593fa1039636229b0630ffddb5e490c6306a0fc | Python | 330mlcc/mp | /src/fundamental/advance_feature/do_listcompr.py | UTF-8 | 216 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
#-*- coding: utf-8 -*-
__author__ = 'liugang5'
if __name__ == "__main__":
L1 = ['Hello', 'World', 18, 'Apple', None]
L2 = [s.lower() for s in L1 if isinstance(s,str) is True]
print(L2) | true |
0cec31d9bef996ac8723c934bbd5892e1ad84ad0 | Python | nomad2001/2048 | /model.py | UTF-8 | 16,658 | 2.640625 | 3 | [
"MIT"
] | permissive | import random
import json
import copy
import hashlib
GOR = 'U'
DOL = 'D'
LEVO = 'L'
DESNO = 'R'
KONEC_IGRE = 'E'
NEOBSTOJECA_SMER = 'N'
ZMAGA = 'Z'
NI_SE_ZMAGAL = 0
ZMAGAL = 1
ZE_PREJ_ZMAGAL = 2
DATOTEKA_ZA_SHRANJEVANJE = "podatki.json"
DATOTEKA_ZA_UPORABNIKE = "uporabniki.json"
class UporabnikiRazred:
def __init__(self, uporabniki = None):
self.uporabniki = uporabniki or {}
def zapisi_v_datoteko(self, datoteka):
json_slovar = {}
for ime, uporabnik in self.uporabniki.items():
json_slovar[ime] = uporabnik.v_slovar()
with open(datoteka, "w") as out_file:
json.dump(json_slovar, out_file)
@staticmethod
def preberi_iz_datoteke(datoteka):
with open(datoteka, "r") as in_file:
json_slovar = json.load(in_file)
uporabniki = {}
for ime, slovar in json_slovar.items():
uporabniki[ime] = Uporabnik.iz_slovarja(slovar)
return UporabnikiRazred(uporabniki)
def dobi_lestvico(self, velikost):
lestvica = [(uporabnisko_ime, self.uporabniki[uporabnisko_ime].najboljsi_rezultati[velikost])
for uporabnisko_ime in self.uporabniki.keys()]
return sorted(lestvica, key = lambda x:(-x[1]))
class Uporabnik:
def __init__(self, uporabnisko_ime, zasifrirano_geslo, najboljsi_rezultati = None):
self.uporabnisko_ime = uporabnisko_ime
self.zasifrirano_geslo = zasifrirano_geslo
if najboljsi_rezultati == None:
self.najboljsi_rezultati = {"3" : 0, "4" : 0, "5" : 0, "6" : 0, "7" : 0, "8" : 0}
else:
self.najboljsi_rezultati = najboljsi_rezultati
@staticmethod
def prijava(uporabnisko_ime, geslo_v_cistopisu):
uporabniki = UporabnikiRazred.preberi_iz_datoteke(DATOTEKA_ZA_UPORABNIKE).uporabniki
if uporabnisko_ime not in uporabniki.keys():
raise ValueError("Uporabniško ime ne obstaja")
elif uporabniki[uporabnisko_ime].preveri_geslo(geslo_v_cistopisu):
return uporabniki[uporabnisko_ime]
else:
raise ValueError("Geslo je napačno")
@staticmethod
def registracija(uporabnisko_ime, geslo_v_cistopisu):
if uporabnisko_ime in \
UporabnikiRazred.preberi_iz_datoteke(DATOTEKA_ZA_UPORABNIKE).uporabniki.keys():
raise ValueError("Uporabniško ime že obstaja")
else:
zasifrirano_geslo = Uporabnik._zasifriraj_geslo(geslo_v_cistopisu)
uporabnik = Uporabnik(uporabnisko_ime, zasifrirano_geslo)
return uporabnik
def _zasifriraj_geslo(geslo_v_cistopisu, sol = None):
if sol is None:
sol = str(random.getrandbits(32))
posoljeno_geslo = sol + geslo_v_cistopisu
h = hashlib.blake2b()
h.update(posoljeno_geslo.encode(encoding="utf-8"))
return f"{sol}${h.hexdigest()}"
def v_slovar(self):
return {
"uporabnisko_ime": self.uporabnisko_ime,
"zasifrirano_geslo": self.zasifrirano_geslo,
"najboljsi_rezultati": self.najboljsi_rezultati,
}
def preveri_geslo(self, geslo_v_cistopisu):
sol, _ = self.zasifrirano_geslo.split("$")
return self.zasifrirano_geslo == Uporabnik._zasifriraj_geslo(geslo_v_cistopisu, sol)
@staticmethod
def iz_slovarja(slovar):
uporabnisko_ime = slovar["uporabnisko_ime"]
zasifrirano_geslo = slovar["zasifrirano_geslo"]
najboljsi_rezultati = slovar["najboljsi_rezultati"]
return Uporabnik(uporabnisko_ime, zasifrirano_geslo, najboljsi_rezultati)
def generirarajNakljucnoPozicijoInStevilo(velikost, tabela):
stProstihMest = 0
for i in range(velikost):
for j in range(velikost):
if tabela[i][j] == 0:
stProstihMest += 1
if stProstihMest == 0:
return
poVrstiProst = random.randrange(stProstihMest) + 1
for i in range(velikost):
for j in range(velikost):
if tabela[i][j] == 0:
poVrstiProst -= 1
if poVrstiProst == 0:
x = i
y = j
break
kateroNovoStevilo = random.randrange(10)
if kateroNovoStevilo % 5 == 0:
tabela[x][y] = 4
else:
tabela[x][y] = 2
class IgreRazred:
def __init__(self, zacetne_igre = None):
self.igre = zacetne_igre or {}
def nova_igra(self, uporabnik, velikost):
sveza_igra = nova_igra(velikost)
self.igre[uporabnik.uporabnisko_ime] = sveza_igra
return uporabnik
def premakni(self, uporabnik, smer):
igra = self.igre[uporabnik.uporabnisko_ime]
konec = igra.premakni(smer)
if igra.steviloTock > uporabnik.najboljsi_rezultati[str(igra.velikost)]:
uporabnik.najboljsi_rezultati[str(igra.velikost)] = igra.steviloTock
self.igre[uporabnik.uporabnisko_ime] = igra
if konec == KONEC_IGRE:
return True
return False
def pretvor_v_json_slovar(self):
slovar_iger = {}
for uporabnisko_ime, igra in self.igre.items():
slovar_iger[uporabnisko_ime] = igra.pretvor_v_json_slovar()
return slovar_iger
def zapisi_v_datoteko(self, datoteka):
with open(datoteka, "w") as out_file:
json_slovar = self.pretvor_v_json_slovar()
json.dump(json_slovar, out_file)
@classmethod
def dobi_iz_json_slovarja(cls, slovar):
slovar_iger = {}
for uporabnisko_ime, igra_slovar in slovar.items():
slovar_iger[uporabnisko_ime] = Igra.dobi_iz_json_slovarja(igra_slovar)
return IgreRazred(slovar_iger)
@staticmethod
def preberi_iz_datoteke(datoteka):
with open(datoteka, "r") as in_file:
json_slovar = json.load(in_file)
return IgreRazred.dobi_iz_json_slovarja(json_slovar)
class Igra:
def __init__(self, velikost = 4, tabela = None, steviloTock = 0, stanje = NI_SE_ZMAGAL):
self.velikost = velikost
self.steviloTock = steviloTock
self.stanje = stanje
if tabela == None:
self.tabela = [[0 for i in range(velikost)] for i in range(velikost)]
generirarajNakljucnoPozicijoInStevilo(self.velikost,self.tabela)
generirarajNakljucnoPozicijoInStevilo(self.velikost,self.tabela)
else:
self.tabela = tabela
def premakniLevo(self):
zeSestet = [[False for i in range(self.velikost)] for i in range(self.velikost)]
aliSeJeKateriPremaknil = False
for i in range(self.velikost):
prostaMesta = []
prvoSeVednoProstoMesto = 0
for j in range(self.velikost):
if self.tabela[i][j] == 0:
prostaMesta.append(j)
elif len(prostaMesta) != 0:
if prostaMesta[prvoSeVednoProstoMesto] == 0:
self.tabela[i][0] = self.tabela[i][j]
self.tabela[i][j] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
else:
if self.tabela[i][prostaMesta[prvoSeVednoProstoMesto] - 1] == self.tabela[i][j] \
and not zeSestet[i][prostaMesta[prvoSeVednoProstoMesto] - 1]:
self.steviloTock += 2 * self.tabela[i][j]
self.tabela[i][prostaMesta[prvoSeVednoProstoMesto] - 1] *= 2
self.tabela[i][j] = 0
zeSestet[i][prostaMesta[prvoSeVednoProstoMesto] - 1] = True
prostaMesta.append(j)
else:
self.tabela[i][prostaMesta[prvoSeVednoProstoMesto]] = self.tabela[i][j]
self.tabela[i][j] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
aliSeJeKateriPremaknil = True
elif j != 0:
if self.tabela[i][j] == self.tabela[i][j - 1]:
self.steviloTock += 2 * self.tabela[i][j - 1]
self.tabela[i][j - 1] *= 2
self.tabela[i][j] = 0
zeSestet[i][j - 1] = True
prostaMesta.append(j)
aliSeJeKateriPremaknil = True
return aliSeJeKateriPremaknil
def premakniGor(self):
zeSestet = [[False for i in range(self.velikost)] for i in range(self.velikost)]
aliSeJeKateriPremaknil = False
for i in range(self.velikost):
prostaMesta = []
prvoSeVednoProstoMesto = 0
for j in range(self.velikost):
if self.tabela[j][i] == 0:
prostaMesta.append(j)
elif len(prostaMesta) != 0:
if prostaMesta[prvoSeVednoProstoMesto] == 0:
self.tabela[0][i] = self.tabela[j][i]
self.tabela[j][i] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
else:
if self.tabela[prostaMesta[prvoSeVednoProstoMesto] - 1][i] == self.tabela[j][i] \
and not zeSestet[prostaMesta[prvoSeVednoProstoMesto] - 1][i]:
self.steviloTock += 2 * self.tabela[j][i]
self.tabela[prostaMesta[prvoSeVednoProstoMesto] - 1][i] *= 2
self.tabela[j][i] = 0
zeSestet[prostaMesta[prvoSeVednoProstoMesto] - 1][i] = True
prostaMesta.append(j)
else:
self.tabela[prostaMesta[prvoSeVednoProstoMesto]][i] = self.tabela[j][i]
self.tabela[j][i] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
aliSeJeKateriPremaknil = True
elif j != 0:
if self.tabela[j][i] == self.tabela[j - 1][i]:
self.steviloTock += 2 * self.tabela[j - 1][i]
self.tabela[j - 1][i] *= 2
self.tabela[j][i] = 0
zeSestet[j - 1][i] = True
prostaMesta.append(j)
aliSeJeKateriPremaknil = True
return aliSeJeKateriPremaknil
def premakniDesno(self):
zeSestet = [[False for i in range(self.velikost)] for i in range(self.velikost)]
aliSeJeKateriPremaknil = False
for i in range(self.velikost):
prostaMesta = []
prvoSeVednoProstoMesto = 0
for j in range(self.velikost - 1, -1, -1):
if self.tabela[i][j] == 0:
prostaMesta.append(j)
elif len(prostaMesta) != 0:
if prostaMesta[prvoSeVednoProstoMesto] == self.velikost - 1:
self.tabela[i][self.velikost - 1] = self.tabela[i][j]
self.tabela[i][j] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
else:
if self.tabela[i][prostaMesta[prvoSeVednoProstoMesto] + 1] == self.tabela[i][j] \
and not zeSestet[i][prostaMesta[prvoSeVednoProstoMesto] + 1]:
self.steviloTock += 2 * self.tabela[i][j]
self.tabela[i][prostaMesta[prvoSeVednoProstoMesto] + 1] *= 2
self.tabela[i][j] = 0
zeSestet[i][prostaMesta[prvoSeVednoProstoMesto] + 1] = True
prostaMesta.append(j)
else:
self.tabela[i][prostaMesta[prvoSeVednoProstoMesto]] = self.tabela[i][j]
self.tabela[i][j] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
aliSeJeKateriPremaknil = True
elif j != self.velikost - 1:
if self.tabela[i][j] == self.tabela[i][j + 1]:
self.steviloTock += 2 * self.tabela[i][j + 1]
self.tabela[i][j + 1] *= 2
self.tabela[i][j] = 0
zeSestet[i][j + 1] = True
prostaMesta.append(j)
aliSeJeKateriPremaknil = True
return aliSeJeKateriPremaknil
def premakniDol(self):
zeSestet = [[False for i in range(self.velikost)] for i in range(self.velikost)]
aliSeJeKateriPremaknil = False
for i in range(self.velikost):
prostaMesta = []
prvoSeVednoProstoMesto = 0
for j in range(self.velikost - 1, -1, -1):
if self.tabela[j][i] == 0:
prostaMesta.append(j)
elif len(prostaMesta) != 0:
if prostaMesta[prvoSeVednoProstoMesto] == self.velikost - 1:
self.tabela[self.velikost - 1][i] = self.tabela[j][i]
self.tabela[j][i] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
else:
if self.tabela[prostaMesta[prvoSeVednoProstoMesto] + 1][i] == self.tabela[j][i] \
and not zeSestet[prostaMesta[prvoSeVednoProstoMesto] + 1][i]:
self.steviloTock += 2 * self.tabela[j][i]
self.tabela[prostaMesta[prvoSeVednoProstoMesto] + 1][i] *= 2
self.tabela[j][i] = 0
zeSestet[prostaMesta[prvoSeVednoProstoMesto] + 1][i] = True
prostaMesta.append(j)
else:
self.tabela[prostaMesta[prvoSeVednoProstoMesto]][i] = self.tabela[j][i]
self.tabela[j][i] = 0
prostaMesta.append(j)
prvoSeVednoProstoMesto += 1
aliSeJeKateriPremaknil = True
elif j != self.velikost - 1:
if self.tabela[j][i] == self.tabela[j + 1][i]:
self.steviloTock += 2 * self.tabela[j + 1][i]
self.tabela[j + 1][i] *= 2
self.tabela[j][i] = 0
zeSestet[j + 1][i] = True
prostaMesta.append(j)
aliSeJeKateriPremaknil = True
return aliSeJeKateriPremaknil
def konecIgre(self):
kopija = copy.deepcopy(self)
if kopija.premakniDesno() == False and kopija.premakniLevo() == False \
and kopija.premakniGor() == False and kopija.premakniDol() == False:
return True
return False
def zmaga(self):
for i in range(self.velikost):
for j in range(self.velikost):
if self.tabela[i][j] == 2048:
return True
return False
def premakni(self, smer):
if smer == GOR:
if self.premakniGor():
generirarajNakljucnoPozicijoInStevilo(self.velikost, self.tabela)
elif smer == DOL:
if self.premakniDol():
generirarajNakljucnoPozicijoInStevilo(self.velikost, self.tabela)
elif smer == DESNO:
if self.premakniDesno():
generirarajNakljucnoPozicijoInStevilo(self.velikost, self.tabela)
elif smer == LEVO:
if self.premakniLevo():
generirarajNakljucnoPozicijoInStevilo(self.velikost, self.tabela)
else:
return NEOBSTOJECA_SMER
if self.konecIgre():
return KONEC_IGRE
if self.zmaga() and self.stanje == NI_SE_ZMAGAL:
self.stanje = ZMAGAL
return ZMAGA
def pretvor_v_json_slovar(self):
return {
"tocke": self.steviloTock,
"velikost": self.velikost,
"tabela": self.tabela,
"stanje": self.stanje
}
@staticmethod
def dobi_iz_json_slovarja(slovar):
return Igra(slovar["velikost"], slovar["tabela"], slovar["tocke"], slovar["stanje"])
def nova_igra(velikost):
return Igra(velikost) | true |
5ab0b6bf4655912ed6e9d1411e860726fb3be3a2 | Python | mengpanqingyun/pogs-python | /pogs/types/high_level.py | UTF-8 | 1,296 | 3.015625 | 3 | [] | no_license | from ctypes import c_int, c_float, c_double
from numpy import zeros, ones
class Solution(object):
def __init__(self,double_precision,m,n):
T = c_double if double_precision else c_float
self.double_precision = double_precision
self.x=zeros(n,dtype=T)
self.y=zeros(m,dtype=T)
self.mu=zeros(n,dtype=T)
self.nu=zeros(m,dtype=T)
class FunctionVector(object):
def __init__(self, length, double_precision=False):
T = c_double if double_precision else c_float
self.a = ones(length,T)
self.b = zeros(length,T)
self.c = ones(length,T)
self.d = zeros(length,T)
self.e = zeros(length,T)
self.h = zeros(length, c_int)
self.double_precision = double_precision
def length(self):
return len(self.a)
def copyfrom(self,f):
self.a[:]=f.a[:]
self.b[:]=f.b[:]
self.c[:]=f.c[:]
self.d[:]=f.d[:]
self.e[:]=f.e[:]
self.h[:]=f.h[:]
def copyto(self,f):
f.a[:]=self.a[:]
f.b[:]=self.b[:]
f.c[:]=self.c[:]
f.d[:]=self.d[:]
f.e[:]=self.e[:]
f.h[:]=self.h[:]
def to_double(self):
if self.double_precision:
return self
else:
f=FunctionVector(self.length(),double_precision=True)
self.copyto(f)
return f
def to_float(self):
if self.double_precision:
f=FunctionVector(self.length())
self.copyto(f)
return f
else:
return self
| true |
264bc8db5897e71fb6e8fa63777ab141d279f946 | Python | 0is4car/note | /summary.py | UTF-8 | 459 | 2.6875 | 3 | [] | no_license | import os
import sys
def analy(dirname):
for basename,dirname,filenames in os.walk(dirname):
for file in filenames:
try:
notename = file.split('.')[0]
type = file.split('.')[1]
if type == 'md':
print('* '+'['+notename+']'+'('+basename+'/'+file+')')
except:
pass
def main():
analy(sys.argv[1])
if __name__ == "__main__":
main()
| true |
d5987f7ddd385a688d32c2afed1fa652bf9ab120 | Python | alexandrucatanescu/neural-program-comprehension | /stimuli/Python/one_file_per_item/en/41_# math_if 5.py | UTF-8 | 189 | 3.234375 | 3 | [
"MIT"
] | permissive | predicted = 13
actual = 11
error = 2
if (predicted - actual < error) or (predicted - actual > -1*error):
print(predicted - actual + error)
else:
print(predicted - actual - error)
| true |
09d73185a1cb487d78afe41f21bb42766c472a92 | Python | s-robota/OpenCV-Tracking | /cap_movie.py | UTF-8 | 3,796 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri May 3 2019
@author: robota
"""
import argparse
import cv2
import datetime
import numpy as np
import os
import sys
import time
# settings
font = cv2.FONT_HERSHEY_SIMPLEX
c_normal = (255, 255, 255)
c_caution = (0, 0, 255) # BGR
class FpsCounter():
def __init__(self):
self._interval_secs = []
self._history_num = 20
self._t = time.time()
def count(self):
cur_t = time.time()
int_sec = cur_t - self._t
self._t = cur_t
self._interval_secs.append(int_sec)
if len(self._interval_secs) > self._history_num:
self._interval_secs.pop(0)
@property
def cur_fps(self):
mean_sec = np.mean(self._interval_secs)
return 1 / mean_sec
def cur_datetime_str():
return datetime.datetime.now().strftime("%Y%m%d-%H:%M:%S")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='cam_capture.py',
description='Capture a movie with web-cam.',
add_help=True,
)
parser.add_argument('-o', '--outdir', type=str, default='./out')
parser.add_argument('-s', '--size', help='width and height',
type=int, nargs=2, default=(640, 480))
parser.add_argument('--lr_flip', default=False, action='store_true',
help='If true, left-right flipped video will be shown & captured.')
args = parser.parse_args()
# 1. parse args
outdir = args.outdir
outdir += '/' if outdir[-1] != '/' else ''
print('OUTPUT DIR : ' + outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
print('- {} was newly created.'.format(outdir))
cap_w, cap_h = args.size
cap_size = (cap_w, cap_h)
print('CAPTURE SIZE (width, height) : {}'.format(cap_size))
lr_flip = args.lr_flip
# 2. capture
# (initialize) create a VideoCapture Instance. Specify camera by the argment.
cap = cv2.VideoCapture(0)
is_capturing = False
out_fpath = None
video_writer = None # cv2.VideoWriter
fps_counter = FpsCounter()
# start loading camera's images
while True:
# Read a single frame from VideoCapture
ret, frame = cap.read()
if frame is None:
raise Exception('Can not capture. Please check if the camera is connected correctly.')
fps_counter.count()
frame = cv2.resize(frame, cap_size)
if lr_flip:
frame = frame[:,::-1]
# display some information
frame_disp = np.copy(frame)
cv2.putText(frame_disp, 'FPS : {:0.1f}'.format(fps_counter.cur_fps),
(10, 20), font, 0.5, c_normal)
if is_capturing:
cv2.putText(frame_disp, 'Now capturing', (10, 40), font, 0.5, c_caution)
cv2.putText(frame_disp, 'Save Path : ' + out_fpath, (10, 60), font, 0.5, c_caution)
else:
cv2.putText(frame_disp, "Push 'Enter' to capture", (10, 40), font, 0.5, c_normal)
# update
cv2.imshow('webcam', frame_disp)
if is_capturing:
video_writer.write(frame)
# wait for key input for 1ms
k = cv2.waitKey(1)
if k == 27 or k == ord('q'): # Esc
break
elif k == 13: # Enter
is_capturing = not is_capturing
if is_capturing:
out_fpath = outdir + 'cap_{}.mp4'.format(cur_datetime_str())
video_writer = cv2.VideoWriter(out_fpath, cv2.VideoWriter_fourcc('m','p','4','v'), 30, cap_size)
print('start capture')
else:
video_writer.release()
print('stop capture')
# Release Capture & Destroy all windows
cap.release()
video_writer.release()
cv2.destroyAllWindows()
| true |
a62309bd515209ac2b6e742b0057677a0da521ff | Python | ndsev/zserio | /compiler/extensions/python/runtime/src/zserio/walker.py | UTF-8 | 25,089 | 3.125 | 3 | [
"BSD-3-Clause"
] | permissive | """
The module implements generic walker through given zserio object tree.
"""
import functools
import re
import typing
from zserio.exception import PythonRuntimeException
from zserio.typeinfo import TypeAttribute, MemberInfo, MemberAttribute
class WalkObserver:
"""
Interface for observers which are called by the walker.
"""
def begin_root(self, compound: typing.Any) -> None:
"""
Called for the root compound zserio object which is to be walked-through.
:param compound: Root compound zserio object.
"""
raise NotImplementedError()
def end_root(self, compound: typing.Any) -> None:
"""
Called at the end of just walked root compound zserio object.
:param compound: Root compound zserio object.
"""
raise NotImplementedError()
def begin_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> None:
"""
Called at the beginning of an array.
Note that for None arrays (i.e. non-present optionals) the visit_value with None is called instead!
:param array: Zserio array.
:param member_info: Array member info.
"""
raise NotImplementedError()
def end_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> None:
"""
Called at the end of an array.
:param array: Zserio array.
:param member_info: Array member info.
"""
raise NotImplementedError()
def begin_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> None:
"""
Called at the beginning of an compound field object.
Note that for None compounds (i.e. uninitialized or optionals) the visit_value method is called instead!
:param compound: Compound zserio object.
:param member_info: Compound member info.
:param element_index: Element index in array or None if the compound is not in array.
"""
raise NotImplementedError()
def end_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> None:
"""
Called at the end of just walked compound object.
:param compound: Compound zserio object.
:param member_info: Compound member info.
:param element_index: Element index in array or None if the compound is not in array.
"""
raise NotImplementedError()
def visit_value(self, value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> None:
"""
Called when a simple (or an unset compound or array - i.e. None) value is reached.
:param value: Simple value.
:param member_info: Member info.
:param element_index: Element index in array or None if the value is not in array.
"""
raise NotImplementedError()
class WalkFilter:
"""
Interface for filters which can influence the walking.
"""
def before_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> bool:
"""
Called before an array.
Note that for None arrays (i.e. non-present optionals) the before_value with None is called instead!
:param array: Zserio array.
:param member_info: Array member info.
:returns: True when the walking should continue to the array.
"""
raise NotImplementedError()
def after_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> bool:
"""
Called after an array.
:param array: Zserio array.
:param member_info: Array member info.
:returns: True when the walking should continue to a next sibling, False to return to the parent.
"""
raise NotImplementedError()
def before_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
"""
Called before a compound object.
Note that for uninitialized compounds (i.e. None) the before_value method is called instead!
:param compound: Compound zserio object.
:param member_info: Compound member info.
:param element_index: Element index in array or None if the compound is not in array.
:returns: True when the walking should continue into the compound object, False otherwise.
"""
raise NotImplementedError()
def after_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
"""
Called after a compound object.
:param compound: Compound zserio object.
:param member_info: Compound member info.
:param element_index: Element index in array or None if the compound is not in array.
:returns: True when the walking should continue to a next sibling, False to return to the parent.
"""
raise NotImplementedError()
def before_value(self, value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
"""
Called before a simple (or an unset compound or array - i.e. None) value.
:param value: Simple value.
:param member_info: Member info.
:param element_index: Element index in array or None if the value is not in array.
:returns: True when the walking should continue to the simple value, False otherwise.
"""
raise NotImplementedError()
def after_value(self, value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
"""
Called after a simple (or an unset compound or array - i.e. None) value.
:param value: Simple value.
:param member_info: Member info.
:param element_index: Element index in array or None if the value is not in array.
:returns: True when the walking should continue to a next sibling, False to return to the parent.
"""
raise NotImplementedError()
class Walker:
"""
Walker through zserio objects, based on generated type info (see -withTypeInfoCode).
"""
def __init__(self, walk_observer: WalkObserver, walk_filter: typing.Optional[WalkFilter] = None) -> None:
"""
Constructor.
:param walk_observer: Observer to use during walking.
:param walk_filter: Walk filter to use.
"""
self._walk_observer = walk_observer
self._walk_filter = walk_filter if walk_filter is not None else DefaultWalkFilter()
def walk(self, zserio_object: typing.Any) -> None:
"""
Walks given zserio compound object which must be generated with type_info
(see -withTypeInfoCode options).
:param zserio_object: Zserio object to walk.
"""
if not hasattr(zserio_object, "type_info"):
raise PythonRuntimeException("Walker: Type info must be enabled"
" (see zserio option -withTypeInfoCode)!")
type_info = zserio_object.type_info()
if TypeAttribute.FIELDS not in type_info.attributes:
raise PythonRuntimeException("Walker: Root object '" + type_info.schema_name +
"' is not a compound type!")
self._walk_observer.begin_root(zserio_object)
self._walk_fields(zserio_object, type_info)
self._walk_observer.end_root(zserio_object)
def _walk_fields(self, zserio_object, type_info) -> None:
fields = type_info.attributes[TypeAttribute.FIELDS]
if TypeAttribute.SELECTOR in type_info.attributes:
# union or choice
choice_tag = zserio_object.choice_tag
if choice_tag != zserio_object.UNDEFINED_CHOICE:
field = fields[choice_tag]
self._walk_field(getattr(zserio_object, field.attributes[MemberAttribute.PROPERTY_NAME]),
field)
# else: uninitialized or empty branch
else:
# structure
for field in fields:
if not self._walk_field(getattr(zserio_object,
field.attributes[MemberAttribute.PROPERTY_NAME]),
field):
break
def _walk_field(self, zserio_object: typing.Any, member_info: MemberInfo) -> bool:
if zserio_object is not None and MemberAttribute.ARRAY_LENGTH in member_info.attributes:
if self._walk_filter.before_array(zserio_object, member_info):
self._walk_observer.begin_array(zserio_object, member_info)
for index, element in enumerate(zserio_object):
if not self._walk_field_value(element, member_info, index):
break
self._walk_observer.end_array(zserio_object, member_info)
return self._walk_filter.after_array(zserio_object, member_info)
else:
return self._walk_field_value(zserio_object, member_info)
def _walk_field_value(self, zserio_object: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
type_info = member_info.type_info
if zserio_object is not None and TypeAttribute.FIELDS in type_info.attributes:
if self._walk_filter.before_compound(zserio_object, member_info, element_index):
self._walk_observer.begin_compound(zserio_object, member_info, element_index)
self._walk_fields(zserio_object, type_info)
self._walk_observer.end_compound(zserio_object, member_info, element_index)
return self._walk_filter.after_compound(zserio_object, member_info, element_index)
else:
if self._walk_filter.before_value(zserio_object, member_info, element_index):
self._walk_observer.visit_value(zserio_object, member_info, element_index)
return self._walk_filter.after_value(zserio_object, member_info, element_index)
class DefaultWalkObserver(WalkObserver):
"""
Default walk observer which just does nothing.
"""
def begin_root(self, _root: typing.Any) -> None:
pass
def end_root(self, _root: typing.Any) -> None:
pass
def begin_array(self, _array: typing.List[typing.Any], member_info: MemberInfo) -> None:
pass
def end_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> None:
pass
def begin_compound(self, _compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> None:
pass
def end_compound(self, _compound: typing.Any, _member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> None:
pass
def visit_value(self, _value : typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> None:
pass
class DefaultWalkFilter(WalkFilter):
"""
Default walk filter which filters nothing.
"""
def before_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
return True
def after_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
return True
def before_compound(self, _compound: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return True
def after_compound(self, _compound: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return True
def before_value(self, _value: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return True
def after_value(self, _value: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return True
class DepthWalkFilter(WalkFilter):
"""
Walk filter which allows to walk only to the given maximum depth.
"""
def __init__(self, max_depth: int):
"""
Constructor.
:param max_depth: Maximum depth to walk to.
"""
self._max_depth = max_depth
self._depth = 1
def before_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
return self._enter_depth_level()
def after_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
return self._leave_depth_level()
def before_compound(self, _compound: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return self._enter_depth_level()
def after_compound(self, _compound: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return self._leave_depth_level()
def before_value(self, _value: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return self._depth <= self._max_depth
def after_value(self, _value: typing.Any, _member_info: MemberInfo,
_element_index: typing.Optional[int] = None) -> bool:
return True
def _enter_depth_level(self) -> bool:
enter = self._depth <= self._max_depth
self._depth += 1
return enter
def _leave_depth_level(self) -> bool:
self._depth -= 1
return True
class RegexWalkFilter(WalkFilter):
"""
Walk filter which allows to walk only paths matching the given regex.
The path is constructed from field names within the root object, thus the root object
itself is not part of the path.
Array elements have the index appended to the path so that e.g. "compound.arrayField[0]" will match
only the first element in the array "arrayField".
"""
def __init__(self, path_regex: str) -> None:
"""
Constructor.
:param path_regex: Path regex to use for filtering.
"""
self._current_path: typing.List[str] = []
self._path_regex = re.compile(path_regex)
def before_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> bool:
self._current_path.append(member_info.schema_name)
if self._path_regex.match(self._get_current_path()):
return True # the array itself matches
# try to find match in each element and continue into the array only if some match is found
# (note that array is never None)
for i, element in enumerate(array):
self._current_path[-1] = member_info.schema_name + f"[{i}]"
if self._match_subtree(element, member_info):
return True
self._current_path[-1] = member_info.schema_name
return False
def after_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
self._current_path.pop()
return True
def before_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._append_path(member_info, element_index)
if self._path_regex.match(self._get_current_path()):
return True # the compound itself matches
return self._match_subtree(compound, member_info)
def after_compound(self, _compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._pop_path(member_info, element_index)
return True
def before_value(self, value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._append_path(member_info, element_index)
return self._match_subtree(value, member_info)
def after_value(self, _value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._pop_path(member_info, element_index)
return True
def _match_subtree(self, member: typing.Any, member_info: MemberInfo) -> bool:
if member is not None and TypeAttribute.FIELDS in member_info.type_info.attributes:
# is a not None compound, try to find match within its subtree
subtree_regex_filter = self._SubtreeRegexFilter(self._current_path.copy(), self._path_regex)
walker = Walker(DefaultWalkObserver(), subtree_regex_filter)
walker.walk(member)
return subtree_regex_filter.matches()
else:
# try to match a simple value or None compound
return self._path_regex.match(self._get_current_path()) is not None
def _get_current_path(self):
return self._get_current_path_impl(self._current_path)
def _append_path(self, member_info: MemberInfo, element_index: typing.Optional[int]) -> None:
self._append_path_impl(self._current_path, member_info, element_index)
def _pop_path(self, member_info: MemberInfo, element_index: typing.Optional[int]) -> None:
self._pop_path_impl(self._current_path, member_info, element_index)
@staticmethod
def _get_current_path_impl(current_path: typing.List[str]) -> str:
return ".".join(current_path)
@staticmethod
def _append_path_impl(current_path: typing.List[str], member_info: MemberInfo,
element_index: typing.Optional[int]) -> None:
if element_index is None:
current_path.append(member_info.schema_name)
else:
current_path[-1] = member_info.schema_name + f"[{element_index}]" # add index
@staticmethod
def _pop_path_impl(current_path: typing.List[str], member_info: MemberInfo,
element_index: typing.Optional[int]) -> None:
if element_index is None:
current_path.pop()
else:
current_path[-1] = member_info.schema_name # just remove the index
class _SubtreeRegexFilter(WalkFilter):
"""
Walks whole subtree and in case of match stops walking. Used to check whether any path
within the subtree matches given regex.
"""
def __init__(self, current_path: typing.List[str], path_regex: typing.Pattern) -> None:
self._current_path = current_path
self._path_regex = path_regex
self._matches = False
def matches(self) -> bool:
"""
Returns whether the subtree contains any matching value.
:returns: True when the subtree contains a matching value, False otherwise.
"""
return self._matches
def before_array(self, _array: typing.List[typing.Any], member_info: MemberInfo) -> bool:
self._current_path.append(member_info.schema_name)
self._matches = self._path_regex.match(self._get_current_path()) is not None
# terminate when the match is already found (note that array is never None here)
return not self._matches
def after_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
self._current_path.pop()
return not self._matches # terminate when the match is already found
def before_compound(self, _compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._append_path(member_info, element_index)
self._matches = self._path_regex.match(self._get_current_path()) is not None
# terminate when the match is already found (note that compound is never None here)
return not self._matches
def after_compound(self, _compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._pop_path(member_info, element_index)
return not self._matches # terminate when the match is already found
def before_value(self, _value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._append_path(member_info, element_index)
self._matches = self._path_regex.match(self._get_current_path()) is not None
return not self._matches # terminate when the match is already found
def after_value(self, _value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
self._pop_path(member_info, element_index)
return not self._matches # terminate when the match is already found
def _get_current_path(self) -> str:
return RegexWalkFilter._get_current_path_impl(self._current_path)
def _append_path(self, member_info: MemberInfo, element_index: typing.Optional[int]) -> None:
RegexWalkFilter._append_path_impl(self._current_path, member_info, element_index)
def _pop_path(self, member_info: MemberInfo, element_index: typing.Optional[int]) -> None:
RegexWalkFilter._pop_path_impl(self._current_path, member_info, element_index)
class ArrayLengthWalkFilter(WalkFilter):
"""
Walk filter which allows to walk only to the given maximum array length.
"""
def __init__(self, max_array_length: int):
"""
Constructor.
:param max_array_length: Maximum array length to walk to.
"""
self._max_array_length = max_array_length
def before_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
return True
def after_array(self, _array: typing.List[typing.Any], _member_info: MemberInfo) -> bool:
return True
def before_compound(self, _compound: typing.Any, _member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._filter_array_element(element_index)
def after_compound(self, _compound: typing.Any, _member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._filter_array_element(element_index)
def before_value(self, _value: typing.Any, _member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._filter_array_element(element_index)
def after_value(self, _value: typing.Any, _member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._filter_array_element(element_index)
def _filter_array_element(self, element_index: typing.Optional[int]) -> bool:
return True if element_index is None else element_index < self._max_array_length
class AndWalkFilter(WalkFilter):
"""
Walk filter which implements composition of particular filters.
The filters are called sequentially and logical and is applied on theirs results.
Note that all filters are always called.
"""
def __init__(self, walk_filters: typing.List[WalkFilter]) -> None:
"""
Constructor.
:param walk_filters: List of filters to use in composition.
"""
self._walk_filters = walk_filters
def before_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> bool:
return self._apply_filters(lambda x: x.before_array(array, member_info))
def after_array(self, array: typing.List[typing.Any], member_info: MemberInfo) -> bool:
return self._apply_filters(lambda x: x.after_array(array, member_info))
def before_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._apply_filters(lambda x: x.before_compound(compound, member_info, element_index))
def after_compound(self, compound: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._apply_filters(lambda x: x.after_compound(compound, member_info, element_index))
def before_value(self, value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._apply_filters(lambda x: x.before_value(value, member_info, element_index))
def after_value(self, value: typing.Any, member_info: MemberInfo,
element_index: typing.Optional[int] = None) -> bool:
return self._apply_filters(lambda x: x.after_value(value, member_info, element_index))
def _apply_filters(self, method):
return functools.reduce(lambda x, y: x and y, map(method, self._walk_filters), True)
| true |
530d0f701ad20b9f15fdaf471da31fd8dc41eb47 | Python | souvikg10/spacy-fasttext | /load_fastText.py | UTF-8 | 2,165 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf8
"""Load vectors for a language trained using fastText
https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md
Compatible with: spaCy v2.0.0+
"""
from __future__ import unicode_literals
import plac
import numpy
import spacy
import random
from spacy.language import Language
TAG_MAP = {
'N': {'pos': 'NOUN'},
'V': {'pos': 'VERB'},
'J': {'pos': 'ADJ'}
}
TRAIN_DATA = [
("Ik zie mooie dingen", {'tags': ['N', 'V', 'J', 'N']}),
("Hij maakt goede muziek", {'tags': ['N','V', 'J', 'N']})
]
def main(vectors_loc=None, lang=None):
if lang is None:
nlp = spacy.blank('nl')
else:
# create empty language class – this is required if you're planning to
# save the model to disk and load it back later (models always need a
# "lang" setting). Use 'xx' for blank multi-language class.
nlp = spacy.blank('nl')
with open('vector/wiki.nl.vec', 'rb') as file_:
header = file_.readline()
nr_row, nr_dim = header.split()
nlp.vocab.reset_vectors(width=int(nr_dim))
for line in file_:
line = line.rstrip().decode('utf8')
pieces = line.rsplit(' ', int(nr_dim))
word = pieces[0]
vector = numpy.asarray([float(v) for v in pieces[1:]], dtype='f')
nlp.vocab.set_vector(word, vector) # add the vectors to the vocab
tagger = nlp.create_pipe('tagger')
# Add the tags. This needs to be done before you start training.
for tag, values in TAG_MAP.items():
tagger.add_label(tag, values)
nlp.add_pipe(tagger)
optimizer = nlp.begin_training()
for i in range(20):
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in TRAIN_DATA:
nlp.update([text], [annotations], sgd=optimizer, losses=losses)
print(losses)
# test the trained model
test_text = "ik wil mooie vrouwen"
doc = nlp(test_text)
print('Tags', [(t.text, t.tag_, t.pos_) for t in doc])
print("Saved model to", 'nl_model_tagger')
nlp.to_disk('/app/model')
if __name__ == '__main__':
main() | true |
4fddc9f28a62dc39770c077705b7be6381ad0969 | Python | kylapurcell/A01088856_1510_assignments | /A4/test_student.py | UTF-8 | 2,926 | 3.078125 | 3 | [] | no_license | from unittest import TestCase
from A4 import student
from unittest.mock import patch
import io
class TestStudent(TestCase):
def setUp(self):
self.test_student = student.Student('Kyla', 'Purcell', 'A01088856', True, [80, 90, 75])
self.test_student2 = student.Student('dahlia', 'billings', 'A01099976', False, [])
def test_get_first_name(self):
expected = "Kyla"
actual = self.test_student.get_first_name()
self.assertEqual(expected, actual)
def test_get_last_name(self):
expected = 'Purcell'
actual = self.test_student.get_last_name()
self.assertEqual(expected, actual)
def test_get_student_number(self):
expected = 'A01088856'
actual = self.test_student.get_student_number()
self.assertEqual(expected, actual)
def test_get_status(self):
expected = True
actual = self.test_student.get_status()
self.assertEqual(expected, actual)
def test_get_grades_list(self):
expected = [80, 90, 75]
actual = self.test_student.get_grades_list()
self.assertEqual(expected, actual)
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_student_info(self, mock_stdout):
expected = 'Name: Kyla Purcell Student Number: A01088856 Status: True Grades: 80 90 75\n'
self.test_student.print_student_info()
self.assertEqual(mock_stdout.getvalue(), expected)
def test_calculate_student_gpa(self):
expected = 81.66666666666667
actual = self.test_student.calculate_student_gpa()
self.assertEqual(expected, actual)
def test_calculate_student_gpa_empty(self):
expected = -1
actual = self.test_student2.calculate_student_gpa()
self.assertEqual(expected, actual)
def test_set_student_grades(self):
self.test_student.set_student_grades([70, 60, 100])
self.assertEqual([70, 60, 100], self.test_student.get_grades_list())
def test_set_student_first_name(self):
self.test_student.set_student_first_name('Kylo')
self.assertEqual('Kylo', self.test_student.get_first_name())
def test_set_student_last_name(self):
self.test_student.set_student_last_name('Purcel')
self.assertEqual('Purcel', self.test_student.get_last_name())
def test_set_student_status(self):
self.test_student.set_student_status(False)
self.assertEqual(False, self.test_student.get_status())
def test_format_first_name(self):
self.test_student2.format_first_name()
self.assertEqual('Dahlia', self.test_student2.get_first_name())
def test_format_last_name(self):
self.test_student2.format_last_name()
self.assertEqual('Billings', self.test_student2.get_last_name())
def test_repr_(self):
expected = 'Kyla Purcell A01088856 True 80 90 75'
self.assertEqual(expected, self.test_student.__repr__())
| true |
214777220c89401a43433ca155b239d90e1f650a | Python | sysinit2811/self_taught | /python_dir/over/test/python_ex34.py | UTF-8 | 31 | 2.625 | 3 | [] | no_license |
hi = "Hello, World!"
print(hi) | true |
18168abafcb81aa8fc79501af6ca382b1d4add50 | Python | nogsantos/python-design-patterns | /tests/test_chain_of_responsibility.py | UTF-8 | 2,717 | 3.25 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from unittest import TestCase
from src.chain_of_responsibility.item import Item
from src.chain_of_responsibility.discount_calculator import DiscountCalculator, Budget
class TestChainOfResponsibility(TestCase):
def setUp(self):
self.budget = Budget()
def test_budget_it_should_calculate_the_total_of_items_values(self):
self.budget.add(Item('Item #1', 100.0))
self.budget.add(Item('Item #2', 100.0))
self.budget.add(Item('Item #3', 100.0))
self.assertEqual(self.budget.total, 300.0)
def test_budget_it_should_get_a_tuple_of_items(self):
self.budget.add(Item('Item #1', 100.0))
self.assertEqual(type(self.budget.items), tuple)
def test_budget_it_should_get_the_length_of_items(self):
self.budget.add(Item('Item #1', 100.0))
self.budget.add(Item('Item #2', 100.0))
self.assertEqual(self.budget.items_len, 2)
def test_budget_it_should_add_items_to_the_list(self):
self.budget.add(Item('Item #1', 100.0))
self.assertEqual(self.budget.items_len, 1)
self.budget.add(Item('Item #2', 100.0))
self.assertEqual(self.budget.items_len, 2)
def test_discount_it_should_calculate_the_budget_discount_when_the_budget_has_more_than_five_items(self):
self.budget.add(Item('Item #1', 100.0))
self.budget.add(Item('Item #2', 100.0))
self.budget.add(Item('Item #3', 100.0))
self.budget.add(Item('Item #4', 100.0))
self.budget.add(Item('Item #5', 100.0))
self.budget.add(Item('Item #6', 100.0))
self.budget.add(Item('Item #7', 100.0))
self.budget.add(Item('Item #8', 100.0))
self.budget.add(Item('Item #9', 100.0))
self.budget.add(Item('Item #10', 100.0))
calculator = DiscountCalculator()
discount = calculator.calculate(self.budget)
self.assertEqual(discount, 100.0)
def test_discount_it_should_calculate_the_budget_discount_when_the_budget_has_less_than_five_items_but_the_value_is_more_than_five_hundred(
self):
self.budget.add(Item('Item #1', 100.0))
self.budget.add(Item('Item #2', 50.0))
self.budget.add(Item('Item #3', 400.0))
calculator = DiscountCalculator()
discount = calculator.calculate(self.budget)
self.assertEqual(discount, 38.50)
def test_discount_it_should_had_no_discount_value(self):
self.budget.add(Item('Item #1', 100.0))
self.budget.add(Item('Item #2', 100.0))
self.budget.add(Item('Item #3', 300.0))
calculator = DiscountCalculator()
discount = calculator.calculate(self.budget)
self.assertEqual(discount, 0.0)
| true |
37982490c1cf3bdba1aaf1d02060f98072ecb8a9 | Python | texus/codingame | /Community Puzzles/May the Triforce be with you.py | UTF-8 | 547 | 3.28125 | 3 | [] | no_license | n = int(input())
piramidWidth = 1 + 2*(n-1)
totalWidth = 2*piramidWidth + 1
print('.' + ' '*(2*(n-1)) + '*')
for i in range(1, n):
starsInRow = 1 + 2*i
spacesBeforeFirstPiramid = (totalWidth - starsInRow) // 2
print(' '*spacesBeforeFirstPiramid + '*'*starsInRow)
for i in range(n):
starsInRow = 1 + 2*i
spacesBeforeFirstPiramid = (piramidWidth - starsInRow) // 2
spacesBetweenPiramids = 2*spacesBeforeFirstPiramid + 1
print(' '*spacesBeforeFirstPiramid + '*'*starsInRow + ' '*spacesBetweenPiramids + '*'*starsInRow)
| true |
2610b18dd510a7da3fa14dd18316bb43763d55e0 | Python | sinyi96/DA_Team3 | /Recon.py | UTF-8 | 508 | 2.859375 | 3 | [] | no_license | import requests
# Set the target webpage
url = 'http://172.18.58.238/creative/'
r = requests.get(url)
headers = {
'User-Agent': 'Mobile'
}
# Display Output Get and Ok
print("Status code:")
print("\t *", r.status_code,headers)
# Display Website Header
h = requests.head(url)
for x in h.headers:
print("\t", x, '.', h.headers[x])
# Modify the headers user-agent
headers = {
'User-Agent': 'Mobile'
}
url2 = 'http://172.18.58.238/creative/'
rh = requests.get(url2, headers=headers)
print(rh.text)
| true |
cd44a820430475d00a2dd03d3fd2fd43e2c6f0e8 | Python | artorious/python3_dojo | /custom_modules/get_positive_number_from_user.py | UTF-8 | 799 | 4.125 | 4 | [] | no_license | #!/usr/bin/env python3
""" Prompt user for a positve number ( integer or floating-point number).
Returns value (float)
"""
def get_positive_num():
""" Prompt user for a positive number (integer or floating-point number)
Returns the float
"""
# init
valid_value = False
while not valid_value:
try:
value = float(input('Enter a positive number: ')) # Enter Max value
if value > 0:
valid_value = True # Terminate loop after getting a valid value
else:
print('Please enter a positive number')
continue # Try entering value again
except ValueError:
print('Expected a number')
return value
if __name__ == '__main__':
get_positive_num()
| true |
3680d39a8072c669802d784805c0c8aa589b78db | Python | jkmrto/EulerProject | /Funciones.py | UTF-8 | 2,351 | 3.4375 | 3 | [] | no_license | def imprime(vector):
"function_docstring"
print("Vector: {0}, Longitud: {1}.".format(vector, len(vector)))
return
def is_div_list(list_num, div):
"Given a list of number, this functions gives back the function" \
"of numbers which are divisibles between div"
return [i for i in list_num if i % div == 0]
def is_div(num, div):
return 0 == num % div
def next_prime_factor():
pass
def factores_primos(siguiente):
fin = 0;
inicio = 2;
resultado = []
while(fin == 0):
valor_dividir = siguiente
#print("Valor: {}".format(valor_dividir))
limite = int((siguiente/2) + 1)
for x in range(inicio, limite, 1):
#print("Bucle principal: {}".format(x))
if((valor_dividir % x) == 0):
# print("Divisible por: {}".format(x))
siguiente = int(valor_dividir / x)
#print("Nuevo: {}".format(siguiente))
resultado.append(x)
break
if(x == (limite - 1)):
fin = 1
resultado.append(valor_dividir)
inicio = resultado #para acelerar el proceso
return resultado
#Dado un vector de factores primos, obtiene los posibles multiplos
#hasta un maximo
#
# def multiplos(v_factores, maximo):
#
# nada = maximo
# index = 0
# resultado = []
# print("longitud multiplos: {}".format(len(v_factores)))
#
# for mul in range(1, len(v_factores) + 1, 1): # Indica el numero de multiplicaciones
#
# for i in range(0, len(v_factores), 1):
# resultado.append(1)
# index = len(resultado) - 1
# print("nuevo, mul: {}, i: {}".format(mul,i))
#
# for x in range(0, mul, 1): # multip
# resultado[index] = resultado[index] * v_factores[x]
# print("next: {}".format(resultado))
#
#
#
# return [resultado]
# def get_count(v):
#
# resultado = []
#
# for i in range (0, len(v), 1):
# encontrado = 0
# for i in range(0, len(resultado,1))
# encontrado = 1
# resultado.append([1,1]):
# break
#
# if(encontrado == 1):
#
# else
#
# return resultado
#def is_(list_num, div_list):
# for i in div_list:
# list_num = is_div(list_num, i)
#
# return list_num | true |
183be557ef520b24979b8b5170c387015ba125ff | Python | JJAleman/python-practice | /Python/basic_operators.py | UTF-8 | 297 | 4.53125 | 5 | [] | no_license | """
1. Assign two different types to the variables "a" and "b".
2. Use basic operators to create a list that contains three instances of "a" and three instances of "b".
"""
# Modify the code below to meet the requirements outlined above
a = "juan"
b = 2
my_list = a + str(b * 3)
print(my_list) | true |
3ca0625ed86aef62909d1aafe9a2763941b6e5dc | Python | freemanhm/session-knn-ae | /generate_folders.py | UTF-8 | 1,090 | 2.765625 | 3 | [
"MIT"
] | permissive | import os
import sys
def generate_folder_structure(dataset):
print("Generating Folders for: " + dataset)
root = "data"
os.makedirs("plots/results")
os.makedirs(root + "/" + dataset + "/raw")
os.makedirs(root + "/" + dataset + "/interim/infer")
os.makedirs(root + "/" + dataset + "/interim/models")
os.makedirs(root + "/" + dataset + "/interim/predict/base")
os.makedirs(root + "/" + dataset + "/interim/predict/hyperparam")
os.makedirs(root + "/" + dataset + "/processed/eval/all")
os.makedirs(root + "/" + dataset + "/processed/eval/next")
os.makedirs(root + "/" + dataset + "/processed/eval/base/all")
os.makedirs(root + "/" + dataset + "/processed/eval/base/next")
os.makedirs(root + "/" + dataset + "/processed/eval/hyperparam/all")
os.makedirs(root + "/" + dataset + "/processed/eval/hyperparam/next")
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: python generate_folders.py dataset1 [dataset2 ...]")
else:
for dataset in sys.argv[1:]:
generate_folder_structure(dataset)
| true |
a23d7d83048a5dc35bb352467a4e1cb7c8127c16 | Python | ruidazeng/online-judge | /Kattis/timeloop.py | UTF-8 | 64 | 2.984375 | 3 | [
"MIT"
] | permissive | for i in range(int(input())): print(str(i + 1) + " Abracadabra") | true |
401b823efd9dbc306a6df8fe257704809d797132 | Python | unnievarghese/originalpythonluminar | /pythondatastructures/dictionary/nested_dict.py | UTF-8 | 786 | 2.671875 | 3 | [] | no_license | employee={101:{'id':101,'name':'ajay','designation':'qa','sallary':15000},
102:{'id':102,'name':'vijay','designation':'developer','sallary':15000},
103:{'id':103,'name':'sumesh','designation':'coustomer care','sallary':15000},
104:{'id':104,'name':'virat','designation':'developer','sallary':15000},
105:{'id':105,'name':'vinod','designation':'accountant','sallary':15000}}
def Print(**arg):
id=arg.get('id')
employee_details=arg.get('property')
if id in employee:
if employee_details in employee[id]:
print(employee[id][employee_details])
else:
print('for employee',id,'employee details not found')
else:
print('employee not found')
a=int(input())
b=input()
Print(id=a,property=b) | true |
90e7a6b412a03e69bd66f3b4655c015d2f6ae587 | Python | Gio1995/CodeUs | /codeus.py | UTF-8 | 9,415 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | from flask import Flask, session, render_template, request, redirect, url_for, g
import os
from flask_pymongo import PyMongo #Modulo per usare il database MongoDB
from flask_mail import Mail, Message #Modulo per inviare e configurare il servizio mail
import hashlib #Modulo per criptare una stringa in sha256
from flask_socketio import SocketIO, send, join_room, leave_room, emit #Moduli della libreria flask_socketio
#Dichiarazione di un oggetto di tipo Flask
app = Flask(__name__)
#Assegnazione di un codice random
app.secret_key = os.urandom(24)
#Dichiarazione di un oggetto di tipo SocketIO
socketio = SocketIO(app, async_mode='eventlet')
#Assegnazione indirizzo database MongoDB e dichiarazione oggetto di tipo PyMongo
app.config['MONGO_URI'] = 'mongodb://localhost:27017/CodeUs'
mongo = PyMongo(app)
#Inserire qui la vostra email e password
email = 'mail'
password = 'password'
#Configurazione del servizio mail
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = email
app.config['MAIL_PASSWORD'] = password
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
#Dichiarazione di un oggetto di tipo Mail
mail = Mail(app)
#Con app.route("/", methods = ['GET', 'POST']) ritorno la pagine index.html
#Quando l'utente effettua una POST elimino l'utente dal dizionario delle sessioni,
# verifico nel database se i dati sono presenti, in caso di valore True creo una
# sessione per l'utente e lo reindirizzo all'area protected, altrimenti ritorno la pagina index.html
@app.route("/", methods = ['GET', 'POST'])
def index():
if request.method == 'POST':
session.pop('user', None)
u = mongo.db.users.find_one({"user" : request.form['username'], "password" : hashlib.sha256((request.form['password']).encode('utf-8')).hexdigest()})
if u:
session['user'] = request.form['username']
return redirect(url_for('protected'))
return render_template("index.html")
#Ritorno la pagina info.html
@app.route("/info")
def info():
return render_template("info.html")
#Ritorno la pagina contact.html
@app.route("/contact")
def contact():
return render_template("contact.html")
#Avviene la registrazione dell'utente
#Quando l'utente inserisce dei dati, quest'ultimo viene tolto dalla sessione, il server cerca nel database
#user o l'email, se esistono già l'utente viene reindirizzato alla pagina register.html, altrimenti se la password1 e la password2 sono uguali
#i dati dell'utente verranno inseriti nel database, verrà creata una sessione per l'utente, verrà creato l'oggetto di tipo Message,
#il body, e verrà inviata l'email all'indirizzo dell'utente, e infine verrà reindirizzato all'area protected.
@app.route("/register", methods = ['GET', 'POST'])
def register():
if request.method == 'POST':
session.pop('user', None)
u = mongo.db.users.find_one({"user" : request.form['username']}) or mongo.db.users.find_one({"email" : request.form['email']})
if not u:
if request.form['password1'] == request.form['password2']:
mongo.db.users.insert({'user' : request.form['username'], 'email' : request.form['email'], 'password' : hashlib.sha256((request.form['password1']).encode('utf-8')).hexdigest()})
session['user'] = request.form['username']
msg = Message('Hello', sender = email, recipients = [request.form['email']])
msg.body = "Salve " + request.form['username'] + " grazie per esserti iscritto a CodeUs."
mail.send(msg)
return redirect(url_for("protected"))
return render_template("register.html")
#Creazione della pagina protected.html:
#Viene verificato se l'utente è loggato con g.user
#in caso affermativo si effettua una ricerca dei progetti creati
# dall'utente e lo si reindirizza alla pagina protected.html passando user e documents
#altrimenti l'utente viene reindirizzato alla pagina index
@app.route("/protected")
def protected():
if g.user:
documents = mongo.db.doc.find({"user" : g.user})
return render_template('protected.html', user = g.user, documents = documents)
return redirect(url_for('index'))
# Redirect per la comparsa dell'editor di testo:
#Se l'utente è loggato verifico la corrispondenza tra progetto e proprietario.
#Se la verifica è andata a buon fine, prendo le informazioni dal database e lo reindirizza alla pagina
#project.html contenente il nome utente e il nome del progetto.
#Se la verifica non è andata a buon fine lo reindirizza alla pagina protected.
#Se l'utente non è loggato, viene reindirizzato alla pagina index
@app.route("/protected/project=<name>")
def project(name):
if g.user:
u = mongo.db.doc.find_one({'user':g.user, 'document':name})
if u:
info = mongo.db.doc.find_one({'user':g.user, 'document':name})
return render_template("project.html", user = g.user, document = info['document'])
else:
return redirect(url_for('protected'))
return redirect(url_for('index'))
#Se l'utente è loggato tramite una ricerca vengono ricavate le informazioni relative all'utente.
#Se la password1 è uguale alla password2 vengono aggiornate tutte le informazioni relative all'utente nel database
#e viene reindirizzato alla pagina protected.html. Se la password1 e la password2 non corrispondono, e se l'utente non ha
#effettuato una POST viene reindirizzato alla pagina modifica.html.
#Se l'utente non è loggato viene reindirizzato alla pagina index.
@app.route("/protected/modifica", methods = ['GET', 'POST'])
def modifica():
if g.user:
if request.method == 'POST':
temp = mongo.db.users.find_one({"user" : session['user']})
if request.form['password1'] == request.form['password2']:
mongo.db.users.update({'user' : session['user']}, {'user' : session['user'], 'email' : temp['email'], 'password' : hashlib.sha256((request.form['password1']).encode('utf-8')).hexdigest()})
return redirect(url_for('protected'))
else:
return render_template("modifica.html", user = g.user)
return render_template("modifica.html", user = g.user)
return redirect(url_for('index'))
#Se l'utente è loggato ed ha effettuato la POST, il server verifica che non ci siano casi di omonimia relativi ai nomi dei progetti
#in tal caso verrà inserito il nome del nuovo progetto associato all'utente e successivamente verrà reindirizzato
#alla pagina protected.
#Se non ha effetuato la POST, l'utente verrà reindirizzato alla pagina new_project.html.
#Se non è loggato verrà reindirizzato alla pagina index.
@app.route("/protected/new_project", methods = ['GET', 'POST'])
def new_project():
if g.user:
if request.method == 'POST':
u = mongo.db.doc.find_one({"user" : session['user'], 'document' : request.form['Nome']})
if not u:
mongo.db.doc.insert({"user" : session['user'], "document" : request.form['Nome'], "content":''})
return redirect(url_for('protected'))
return render_template('new_project.html', user = g.user)
return redirect(url_for('index'))
#Assegnazione della sessione all'utente
@app.before_request
def before_request():
g.user = None
if 'user' in session:
g.user = session['user']
#Rimozione del nome utente dalla sessione
@app.route("/logout")
def logout():
session.pop('user', None)
return redirect(url_for('index'))
#Alla chiamata di 'message' viene inviato il messaggio in msg alla stanza msg['room']
@socketio.on("message")
def text(msg):
room = msg['room']
send(msg['data'], room=room)
#*********************gestione delle rooms***********
#Alla chiamata 'join' vengono memorizzati i valori data['username'] nella variabile username e data['project'] nella variabile room.
#Con join_room() l'utente partecipa alla stanza che ha per nome il nome del progetto, ed infine con send()
#viene inviato il messaggio di accesso a tutti gli utenti che partecipano alla stanza.
@socketio.on('join')
def join(data):
username = data['username']
room = data['project']
join_room(room)
send(username + ' has entered the room.', room=room)
#*******************rooms link******************
#Alla richiesta dell'URL /room_link verifico che l'utente sia loggato e memorizzo tutti i dati relativi
#alla tabella doc nella variabile u, successivamente l'utente verrà reindirizzato alla pagina rooms.html
#mostrandogli i dati
@app.route('/room_link')
def Rooms():
if g.user:
u = mongo.db.doc.find()
return render_template('rooms.html', dati=u)
return redirect(url_for('index'))
#Se l'utente è loggato verrà reindirizzato alla pagina join_project.html mostrandogli il nome utente ed
#il nome del progetto
@app.route("/room_link/proj=<name>")
def link_proj(name):
if g.user:
return render_template('join_project.html', user = g.user, document = name)
return redirect(url_for('index'))
#Avvio del server
#Se il file codeus.py è il main del progetto verrà eseguito il server con parametri app per la configurazione,
#debug=True per l'attivazione del debug e host='0.0.0.0' per ricevere le richieste da tutti gli indirizzi IP
if __name__ == '__main__':
socketio.run(app, debug=True, host = '0.0.0.0')
| true |
89f49edee69abf6009aed133d5ca0a2f4c72ff3b | Python | tul1/py-mower | /tests/unit/resources/models/test_mower_model.py | UTF-8 | 13,020 | 2.921875 | 3 | [] | no_license | import pytest
from unittest import TestCase
from mower.resources.models.directions import RelativeDirection, OrdinalDirection
from mower.resources.models.mower_model import MowerModel, MowerPosition
from mower.resources.models.lawn_model import LawnDimensions
from mower.utils.exceptions import MowerModelLoadError, MowerModelError
class TestMowerModel(TestCase):
"""MowerModel Test."""
def test_from_initial_position_str(self):
"""Test building a mower model from initial position string."""
# Given
raw_init_pos = ' 2 2 N '
# When
mower_model = MowerModel.from_initial_position_str(raw_init_pos)
# Then
expected_position = (2, 2, OrdinalDirection.NORTH)
self.assertEqual(expected_position, mower_model.position)
self.assertEqual([], mower_model.directions)
def test_from_initial_position_str_raises_on_wrong_direction(self):
"""Test building a mower model from initial position string."""
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
' ')
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
'2 d N')
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
'e 3 N')
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
'2 3 2')
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
'adsasdasdasd')
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
'2223123123')
self.assertRaises(MowerModelLoadError,
MowerModel.from_initial_position_str,
'22222222222 2222222332 NSADS')
def test_extend_directions_from_str(self):
"""Test extend direction to mower model from string."""
# Given
raw_directions = ' L FR B BRF LL FFR R '
mower_model = MowerModel(position=(1, 1, OrdinalDirection.NORTH))
# When
mower_model = MowerModel.extend_directions_from_str(mower_model, raw_directions)
# Then
expected_directions = [RelativeDirection.LEFT, RelativeDirection.FRONT, RelativeDirection.RIGHT, RelativeDirection.BACK,
RelativeDirection.BACK, RelativeDirection.RIGHT, RelativeDirection.FRONT, RelativeDirection.LEFT,
RelativeDirection.LEFT, RelativeDirection.FRONT, RelativeDirection.FRONT, RelativeDirection.RIGHT,
RelativeDirection.RIGHT]
self.assertEquals(expected_directions, mower_model.directions)
def test_extend_directions_from_str_returns_base_direction_list_on_none_directions_input(self):
"""Test extend directions from string when empty direction but base mower has directions."""
# Given
raw_directions = ' '
mower_model = MowerModel(position=(1, 1, OrdinalDirection.NORTH),
directions=[RelativeDirection.BACK, RelativeDirection.FRONT])
# When
mower_model = MowerModel.extend_directions_from_str(mower_model, raw_directions)
# Then
expected_directions = [RelativeDirection.BACK, RelativeDirection.FRONT]
self.assertEquals(expected_directions, mower_model.directions)
def test_extend_directions_from_str_returns_empty_list_on_none_directions_input(self):
"""Test extend directions from string when empty direction."""
# Given
raw_directions = ' '
mower_model = MowerModel(position=(1, 1, OrdinalDirection.NORTH))
# When
mower_model = MowerModel.extend_directions_from_str(mower_model, raw_directions)
# Then
expected_directions = []
self.assertEquals(expected_directions, mower_model.directions)
def test_translate_mower_position_moving_foreward(self):
"""Test when translate mower foreward."""
# Mower moves 1 position
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.NORTH),
1,
RelativeDirection.FRONT,
LawnDimensions(3, 5))
self.assertEquals((1, 2, OrdinalDirection.NORTH), position)
# Mower moves 1 position
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.NORTH),
-1,
RelativeDirection.FRONT,
LawnDimensions(3, 5))
self.assertEquals((1, 2, OrdinalDirection.NORTH), position)
# Mower reaches upper limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.NORTH),
10,
RelativeDirection.FRONT,
LawnDimensions(3, 5))
self.assertEquals((1, 4, OrdinalDirection.NORTH), position)
# Mower reaches lower limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.SOUTH),
10,
RelativeDirection.FRONT,
LawnDimensions(3, 5))
self.assertEquals((1, 0, OrdinalDirection.SOUTH), position)
# Mower reaches further right limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.EAST),
10,
RelativeDirection.FRONT,
LawnDimensions(3, 5))
self.assertEquals((2, 1, OrdinalDirection.EAST), position)
# Mower reaches further left limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.WEST),
10,
RelativeDirection.FRONT,
LawnDimensions(3, 5))
self.assertEquals((0, 1, OrdinalDirection.WEST), position)
def test_translate_mower_position_moving_backward(self):
"""Test when translate mower backward."""
# Mower moves 1 position
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.NORTH),
1,
RelativeDirection.BACK,
LawnDimensions(3, 5))
self.assertEquals((1, 0, OrdinalDirection.NORTH), position)
# Mower moves 1 position
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.NORTH),
-1,
RelativeDirection.BACK,
LawnDimensions(3, 5))
self.assertEquals((1, 0, OrdinalDirection.NORTH), position)
# Mower reaches upper limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.NORTH),
10,
RelativeDirection.BACK,
LawnDimensions(3, 5))
self.assertEquals((1, 0, OrdinalDirection.NORTH), position)
# Mower reaches lower limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.SOUTH),
10,
RelativeDirection.BACK,
LawnDimensions(3, 5))
self.assertEquals((1, 4, OrdinalDirection.SOUTH), position)
# Mower reaches further right limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.EAST),
10,
RelativeDirection.BACK,
LawnDimensions(3, 5))
self.assertEquals((0, 1, OrdinalDirection.EAST), position)
# Mower reaches further left limit
position = MowerModel.translate_mower_position(MowerPosition(1, 1, OrdinalDirection.WEST),
10,
RelativeDirection.BACK,
LawnDimensions(3, 5))
self.assertEquals((2, 1, OrdinalDirection.WEST), position)
def test_translate_mower_position_raises_MowerModelError(self):
"""Test when translate raises exception because of wrong direction."""
self.assertRaises(MowerModelError,
MowerModel.translate_mower_position,
(1, 1, OrdinalDirection.NORTH),
10,
RelativeDirection.LEFT,
(2, 2))
self.assertRaises(MowerModelError,
MowerModel.translate_mower_position,
(1, 1, OrdinalDirection.NORTH),
10,
RelativeDirection.RIGHT,
(2, 2))
def test_rotate_mower_position_clockwise(self):
"""Test when rotate raises exception because of wrong direction."""
# Rotate North to East
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.NORTH),
RelativeDirection.RIGHT)
self.assertEquals((1, 1, OrdinalDirection.EAST), position)
# Rotate East to South
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.EAST),
RelativeDirection.RIGHT)
self.assertEquals((1, 1, OrdinalDirection.SOUTH), position)
# Rotate South to West
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.SOUTH),
RelativeDirection.RIGHT)
self.assertEquals((1, 1, OrdinalDirection.WEST), position)
# Rotate West to North
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.WEST),
RelativeDirection.RIGHT)
self.assertEquals((1, 1, OrdinalDirection.NORTH), position)
def test_rotate_mower_position_anticlockwise(self):
"""Test when rotate raises exception because of wrong direction."""
# Rotate North to West
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.NORTH),
RelativeDirection.LEFT)
self.assertEquals((1, 1, OrdinalDirection.WEST), position)
# Rotate West to South
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.WEST),
RelativeDirection.LEFT)
self.assertEquals((1, 1, OrdinalDirection.SOUTH), position)
# Rotate South to East
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.SOUTH),
RelativeDirection.LEFT)
self.assertEquals((1, 1, OrdinalDirection.EAST), position)
# Rotate East to North
position = MowerModel.rotate_mower_position((1, 1, OrdinalDirection.EAST),
RelativeDirection.LEFT)
self.assertEquals((1, 1, OrdinalDirection.NORTH), position)
def test_rotate_mower_position_raises_MowerModelError(self):
"""Test when rotate raises exception because of wrong direction."""
self.assertRaises(MowerModelError,
MowerModel.rotate_mower_position,
(1, 1, OrdinalDirection.NORTH),
RelativeDirection.BACK)
self.assertRaises(MowerModelError,
MowerModel.rotate_mower_position,
(1, 1, OrdinalDirection.NORTH),
RelativeDirection.FRONT)
| true |
2769efec8826410abdb8b363193d61f54a86bfdd | Python | xiwei26/CS510 | /PA1/a1.py | UTF-8 | 2,132 | 2.859375 | 3 | [] | no_license | import sys
import numpy as np
import cv2
import os
#####input and output file directories and names#####
if len(sys.argv) != 3:
print "Usage: python a1.py <in_file> <out_file>"
sys.exit()
in_file=sys.argv[1]
out_file=sys.argv[2]
if os.path.isfile(out_file):
os.remove(out_file)
#####user input#####
start_frame = int(raw_input("Please tell me the start frame (0-based): "))
end_frame = int(raw_input("Please tell me the last frame (0-based): "))
startX = int(raw_input("what's the object's start x: "))
startY = int(raw_input("what's the object's start y: "))
height = int(raw_input("what is the object's height: "))
width = int(raw_input("what is the object's width: "))
speed_in_x= float(raw_input("what's the velosity in x axis: ")) #can be double
speed_in_y = float(raw_input("what's the velosity in y axis: ")) #can be double
output_ht = int(raw_input("what is the output video's height: "))
output_wd = int(raw_input("what is the output video's width: "))
#####read video and information#####
cap = cv2.VideoCapture(in_file)
if cap.isOpened()==False:
print("Can not open the video")
sys.exit()
total_frame_num = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))
codec = int(cap.get(cv2.cv.CV_CAP_PROP_FOURCC))
out = cv2.VideoWriter(out_file, codec, fps, (output_wd,output_ht))
if out.isOpened() == False:
print("Cannot open output file!")
sys.exit()
#####processing video#####
for i in range (0, end_frame + 1):
ret, frame = cap.read()
if(i >= start_frame):
currX = int(startX + speed_in_x * (i - start_frame) ) #Better this way if speed_in_x is a double
currY = int(startY + speed_in_y * (i - start_frame) )
pts1 = np.float32([[currX, currY],[currX, currY+height-1],[currX+width-1, currY]])
pts2 = np.float32([[0,0],[0,output_ht-1],[output_wd-1,0]])
M = cv2.getAffineTransform(pts1,pts2)
#cv2.rectangle(frame, (currX, currY), (currX + width, currY+ height), (0,0,255),2)
dst = cv2.warpAffine(frame, M, (output_wd,output_ht))
out.write(dst)
#if cv2.waitKey(10) & 0xFF == ord('q'):
# break
cap.release()
out.release()
cv2.destroyAllWindows()
| true |
f88f66a0a10c1884d3caa88d0292f36da8cf32a8 | Python | pahaz/events-site-example | /events_site_example/apps/common_app/tests/tests.py | UTF-8 | 858 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from django.test import TestCase
from apps.common_app.utils import add_model_prefix
class TestUtils(TestCase):
def test_add_model_prefix(self):
experiments = [
{
'prefix': None,
'to': 'some_value',
'expected': 'some_value',
'msg': 'if "prefix" is not set, then "to" should be returned'
},
{
'prefix': 'prefix',
'to': 'some_value',
'expected': 'prefix__some_value',
'msg': 'if "prefix" is set, then "prefix" should be concatenated to "to" with two underscores'
}
]
for exp in experiments:
response = add_model_prefix(exp['prefix'], exp['to'])
self.assertEqual(response, exp['expected'], msg=exp['msg'])
| true |
ff265f8d36d14ce0634717ef20c43ae2b74c866f | Python | VRER1997/leetcode_python | /easy/541 Reverse String II.py | UTF-8 | 298 | 3.296875 | 3 | [] | no_license | class Solution:
def reverseStr(self, s: str, k: int) -> str:
res = []
for i in range(0, len(s), 2*k):
res.append(s[i:i+k][::-1] + s[i+k:i+2*k])
return ''.join(res)
if __name__ == '__main__':
s = Solution()
print(s.reverseStr(s = "abcdefg", k = 9))
| true |
9618346ca146adcaf0deeb8e17a32eb269c89f39 | Python | BraulioAO/Taller_Cripto | /Cifrado_DH/dh.py | UTF-8 | 2,411 | 4.09375 | 4 | [] | no_license | #!/usr/bin/python3
# UNIVERSIDAD NACIONAL AUTÓNOMA DE MÉXICO
# FACULTAD DE INGENIERÍA
# MANUAL DE ACTIVIDADES PRÁCTICAS DE CRIPTOGRAFÍA
# 5. DH (DEFFIE - HELLMAN)
# NOMBRE DEL ALUMNO: Arrieta Ocampo Braulio Enrique
from math import log
def es_primo(p):
""" Comprueba si un número p es primo o no mediante la operación
módulo. Si "p mod n" es cero, significa que p es divisible
entre n, por lo que ya no es primo, sino sigue comprobando.
"""
for n in range(2,p):
if not p%n: #Si el residuo es cero, no es primo.
return False
else:
pass
return True
print("\n\t\t*°*°*° Diffie y Hellman *°*°*°")
nombre = input("\n\n\tIngrese el nombre de la víctima: ")
try: #Verifica que se ingrese un número, sino error.
p = int(input("\n\tIngrese el valor P acordado: "))
if not es_primo(p):
print("\n\t\t***El valor P no es primo***\n")
exit()
y = int(input("\n\tIngrese el valor Y acordado: "))
if y >= p:
print("\n\t\t***El valor Y debe ser menor que P ({})***\n".format(p))
exit()
kpub = int(input("\n\tIngrese la clave pública de {}: ".format(nombre)))
if kpub >= p:
print("\n\t\t***El valor de la clave pública debe ser menor que P ({})***\n".format(p))
exit()
except ValueError:
print("\n\t***El valor ingresado no es numérico***\n")
exit()
kpriv = int(log(kpub,y)%p) #Calcula posible clave privada mediante: kpriv = logY(kpub) mod p
print("\n\tPosible clave secreta de {}: {:d}".format(nombre, kpriv))
while True:
op = input("\n\t¿Desea ver la lista completa de los números posibles? Y/N: ").lower()
if op == 'y':
# Calcula mediante fuerza bruta todas las posibles claves privadas (kpriv) con las que se
# puede obtener la misma clave pública (kpub). Para esto, dado que kpriv se encuentra
# comprendida entre 1 < kpriv < p, es necesario buscar todos aquellos valores de kpriv
# que sean igual a: kpub = (y^kpriv mod p), donde 'kpub, p, y' son los valores conocidos.
#Crea lista de valores kpriv que cumplen con kpub = (y^kpriv mod p)
kpriv_posibles = list(filter(lambda i: y**i%p == kpub, range(2,p)))
print("\n\t{}\n".format(kpriv_posibles))
exit()
elif op == 'n':
print("")
exit()
print("\n\t\t*** Opción inválida, vuelva a intentar ***")
| true |
8aad6c8d82ddcdb18cfd27b9b31d53ad606adac1 | Python | milo71/md-to-html | /md-to-html.py | UTF-8 | 566 | 2.75 | 3 | [] | no_license | import sys
import markdown
import argparse
ap = argparse.ArgumentParser(description='Parse markdown to html')
ap.add_argument('-i', '--input', required=True, help='input file name')
ap.add_argument('-o', '--output', required=True, help='output file name')
args = ap.parse_args()
with open(args.input,'r') as inputfile:
htmlhead = '<html>\n<head>\n</head>\n<body>\n'
htmlfoot = '\n</body>\n</html>'
html = markdown.markdown(inputfile.read())
htmldoc = htmlhead+html+htmlfoot
with open(args.output,'w') as outputfile:
outputfile.write(htmldoc)
| true |
59337cba3ddd09f3ca90ea9f9e3c084011e6ae8d | Python | ceholden/yatsm | /yatsm/vegetation_indices.py | UTF-8 | 790 | 3.65625 | 4 | [
"MIT"
] | permissive | """ Functions for computing vegetation indices
"""
from __future__ import division
def EVI(red, nir, blue):
""" Return the Enhanced Vegetation Index for a set of np.ndarrays
EVI is calculated as:
.. math::
2.5 * \\frac{(NIR - RED)}{(NIR + C_1 * RED - C_2 * BLUE + L)}
where:
- :math:`RED` is the red band
- :math:`NIR` is the near infrared band
- :math:`BLUE` is the blue band
- :math:`C_1 = 6`
- :math:`C_2 = 7.5`
- :math:`L = 1`
Note: bands must be given in float datatype from [0, 1]
Args:
red (np.ndarray): red band
nir (np.ndarray): NIR band
blue (np.ndarray): blue band
Returns:
np.ndarray: EVI
"""
return 2.5 * (nir - red) / (nir + 6 * red - 7.5 * blue + 1)
| true |
7db2a2501aba087fd332e40667e31ca135fa4e97 | Python | ganddev/IS | /Beleg3.py | UTF-8 | 4,168 | 3.03125 | 3 | [] | no_license | __author__ = 'Daniel'
#!/usr/bin/python
import numpy as np
import matplotlib .pyplot as plt
##create trainings data
def create_data():
xValues = np.arange(0,np.pi+0.1, (np.pi / 5))
gauss = np.random.normal(0, 0.2, 6)
yValues = np.sin(-xValues) + gauss
return xValues, yValues
##transform x values to feature vektor z
def transform(x):
return np.array([x,x**2,x**3,x**4,x**5])
##
# recaling
# scaling and rescaling
# scale x by given xmean and xstd
def rescale_by(x, xmean, xstd):
return (x - xmean) / xstd
#compute xmean and xstd
#and scale x
def rescale(x):
xmean = x.mean(axis=0)
xstd = x.std(axis=0)
x_norm = rescale_by(x, xmean, xstd)
return x_norm, xmean, xstd
# scale back parameter
def back_scale_parameter(theta, xmean, xstd):
t = np.ones(theta.size)
t[0] = theta[0] - (theta[1:] * xmean / xstd).sum()
t[1:] = theta[1:] / xstd
return t
def scale_parameter(theta, xmean, xstd):
t = np.ones(theta.size)
t[0] = theta[0] + (theta[1:] * xmean).sum()
t[1:] = theta[1:] * xstd
return t
##### for logistic regression ####
def logistic_function(x):
return 1. / (1. + np.exp(-x))
def logistic_hypothesis(theta):
return lambda X: logistic_function(X.dot(theta))
def cross_entropy_loss(h, y):
return lambda theta: y * np.log(h(theta)) + (1-y) * np.log (1. - h(theta))
##### for linear regression ####
def linear_hypothesis(theta):
return lambda X: X.dot(theta)
def squared_error_loss(h, x, y):
return lambda theta: 1./2. * (h(theta)(x) - y)**2
def sin_hypothesis(theta):
return lambda X: theta[5] + X*theta[4] + theta[3]*(X**2) + theta[2]* X**3 + theta[1]*X**4 + theta[0]*X**5
# didactic code to demonstrate gradient decent
# not optimized for speed
def cost_function(X, y, h, l, loss):
m = len(y)
return lambda theta: 1./(float(m)) * (loss(h,X,y)(theta).sum())
# add x_0 for all x(Feature Vectors)
def addColumnZero(x):
m = len(x)
n = x[0].size + 1
X = np.ones(shape=(m,n))
X[:,1:] = x
return X
def compute_new_theta(X, y, theta, alpha, hypothesis, l):
m = len(X)
h = hypothesis(theta)(X)
error_sum = np.sum(X.T.dot(h - y))
# update rule for j = 0
theta[0] = theta[0] - alpha * (1.0 / float(m)) * error_sum
#update rule for j != 0
theta[1:] = theta[1:]*(1-alpha*(l/float(m))) - alpha/float(m) * error_sum
return theta
def gradient_descent(x, y, theta_, alpha, num_iters, hypothesis, l, loss):
x_norm ,xmean, xstd = rescale(x)
theta = scale_parameter(theta_, xmean, xstd)
X = addColumnZero(x_norm)
assert X[0].size == theta.size
n = theta.size
#history_theta = np.ones((num_iters+1, n), dtype=np.float)
#history_cost = np.ones((num_iters+1), dtype=np.float)
cost = cost_function(X, y, hypothesis, l, loss)
#history_cost[0] = cost(theta)
#history_theta[0] = back_scale_parameter(theta, xmean, xstd)
for i in xrange(num_iters):
theta = compute_new_theta(X, y, theta, alpha, hypothesis, l)
#history_cost[i+1] = cost(theta)
#history_theta[i+1] = back_scale_parameter(theta, xmean, xstd)
#return history_theta, history_cost
return back_scale_parameter(theta, xmean, xstd), cost(theta)
if __name__ == "__main__":
[xValues, yValues] = create_data()
z = np.array([transform(xValues[0])])
for x in xValues[1:]:
z = np.concatenate((z,np.array([transform(x)])))
thetas = np.ones(6)
[tettas, costs] = gradient_descent(z,yValues,thetas, 0.1, 1000, sin_hypothesis,0.001,squared_error_loss)
[tettas1, costs1] = gradient_descent(z,yValues,thetas, 0.01, 1000, sin_hypothesis,0.001,squared_error_loss)
[tettas2, costs2] = gradient_descent(z,yValues,thetas, 0.0001, 1000, sin_hypothesis,0.001,squared_error_loss)
[tettas3, costs3] = gradient_descent(z,yValues,thetas, 0.00001, 1000, sin_hypothesis,1,squared_error_loss)
print tettas
print tettas1
print tettas2
fig1 = plt.figure(1)
plt.scatter(xValues,yValues)
x = np.linspace(-1,3.2,100)
y = sin_hypothesis(tettas)(x)
y1 = sin_hypothesis(tettas1)(x)
y2= sin_hypothesis(tettas2)(x)
y3 = sin_hypothesis(tettas3)(x)
plt.plot(x,y)
plt.plot(x,y1)
plt.plot(x,y2)
plt.plot(x,y3)
plt.show() | true |
50a09951af0b5f43d13078845709bc73e04262fd | Python | scortier/Leetcode-Submissions | /problems/number_of_ways_of_cutting_a_pizza/solution.py | UTF-8 | 3,410 | 3.296875 | 3 | [] | no_license | # improting function tools
import functools
# definign class for Solution.py
class Solution:
# defining lists variables
def ways(self, pizza: List[str], k: int) -> int:
# defining m & n as a variable
m, n = len(pizza), len(pizza[0])
# defining mod values
MOD = 10 ** 9 + 7
# new variable apple
remainsApple = [[0] * n for _ in range(m)]
# loop execution
for i in range(m - 1, -1, -1):
# initial values , providing initial values
rightApple = 0
# looping j
for j in range(n - 1, -1, -1):
# if-else looping
if pizza[i][j] == "A":
# priniting value
rightApple += 1
# other remaining values print
remainsApple[i][j] = (remainsApple[i + 1][j] if i != m - 1 else 0) + rightApple
# defining other variables for second case
def rightHasApple(i, j):
# if-else logical execution
if i == m - 1:
# returning values
return remainsApple[i][j] > 0
else:
# returning values
return remainsApple[i][j] - remainsApple[i + 1][j] > 0
# definign for next czase
def belowHasApple(i, j):
if j == n - 1:
# returning values
return remainsApple[i][j] > 0
else:
# returning values
return remainsApple[i][j] - remainsApple[i][j + 1] > 0
# improting from function tools
@functools.lru_cache(None)
# definign other for next case
def dp(remainPieces, i, j):
# if- else logical execution
if remainPieces == 1:
# returning values
return 1 if remainsApple[i][j] > 0 else 0
# if- else logical execution
if remainsApple[i][j] < remainPieces:
# returning values
return 0
# if- else logical execution
if i == m - 1 and j == n - 1:
assert(False)
## initial values
res = 0
seenAppleOnRight = False
# for - while loop
for new_i in range(i + 1, m):
seenAppleOnRight = seenAppleOnRight or rightHasApple(new_i - 1, j)
# if- else logical execution
if seenAppleOnRight:
res += dp(remainPieces - 1, new_i, j)
res %= MOD
# returning values as 0
seenAppleBelow = False
# for while loop
for new_j in range(j + 1, n):
seenAppleBelow = seenAppleBelow or belowHasApple(i, new_j - 1)
# if- else logical execution
if seenAppleBelow:
res += dp(remainPieces - 1, i, new_j)
res %= MOD
# returning values
return res
# returning values
return dp(k, 0, 0) | true |
a917d2960396379e7b13ac47ccc85c6235a34057 | Python | felixz2535/Halite-ML | /trainer.py | UTF-8 | 4,469 | 2.546875 | 3 | [] | no_license | import tensorflow as tf
import os
import numpy as np
import time
import random
from tqdm import tqdm
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
LOAD_TRAIN_FILES = False
LOAD_PREV_MODEL = False
HALITE_THRESHOLD = 4400
TRAINING_CHUNK_SIZE = 500
PREV_MODEL_NAME = ""
VALIDATION_GAME_COUNT = 50
NAME = f"phase1-{int(time.time())}"
EPOCHS = 1
TRAINING_DATA_DIR = 'training_data'
training_file_names = []
def balance(x,y):
_0 = []
_1 = []
_2 = []
_3 = []
_4 = []
for x, y in zip(x, y):
if y == 0:
_0.append([x, y])
elif y == 1:
_1.append([x, y])
elif y == 2:
_2.append([x, y])
elif y == 3:
_3.append([x, y])
elif y == 4:
_4.append([x, y])
shortest = min([len(_0),
len(_1),
len(_2),
len(_3),
len(_4),
])
_0 = _0[:shortest]
_1 = _1[:shortest]
_2 = _2[:shortest]
_3 = _3[:shortest]
_4 = _4[:shortest]
balanced = _0 + _1 + _2 + _3 +_4
random.shuffle(balanced)
print(f"The shortest file was {shortest}, total balanced length is {len(balanced)}")
xs = []
ys = []
for x, y in balanced:
xs.append(x)
ys.append(y)
return xs, ys
for f in os.listdir(TRAINING_DATA_DIR):
halite_amount = int(f.split("-")[0])
if halite_amount >= HALITE_THRESHOLD:
training_file_names.append(os.path.join(TRAINING_DATA_DIR, f))
print(f"After the threshold we have {len(training_file_names)} games")
random.shuffle(training_file_names)
if LOAD_TRAIN_FILES:
test_x = np.load("test_x.npy")
test_y = np.load("test_y.npy")
else:
test_x = []
test_y = []
for f in tqdm(training_file_names[:VALIDATION_GAME_COUNT]):
data = np.load(f)
for d in data:
test_x.append(np.array(d[0]))
test_y.append(d[1])
np.save("test_x.npy", test_x)
np.save("test_y.npy", test_y)
test_x = np.array(test_x)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i +n]
tboard_log_dir = os.path.join("logs",NAME)
tensorboard = TensorBoard(log_dir = tboard_log_dir)
if LOAD_PREV_MODEL:
model = tf.keras.models.load_model(PREV_MODEL_NAME)
else:
model = Sequential()
model.add(Conv2D(64, (3, 3), padding = "same", input_shape = test_x.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding = "same"))
model.add(Conv2D(64, (3, 3), padding = "same"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding = "same"))
model.add(Conv2D(64, (3, 3), padding = "same"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding = "same"))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(5))
model.add(Activation('sigmoid'))
opt = tf.keras.optimizers.Adam(lr = .001, decay = .001)
model.compile(loss="sparse_categorical_crossentropy",
optimizer = opt,
metrics = ['accuracy'])
for e in range(EPOCHS):
training_file_chunks = chunks(training_file_names[VALIDATION_GAME_COUNT:], TRAINING_CHUNK_SIZE)
print(f"currently working on epoch {e}")
for idx, training_files in enumerate(training_file_chunks):
print(f"Working on data chunk {idx+1}/{round(len(training_file_names)/TRAINING_CHUNK_SIZE, 2)} ")
if LOAD_TRAIN_FILES or e > 0:
X = np.load(f"X-{idx}.npy")
y = np.load(f"y-{idx}.npy")
else:
X = []
y = []
for f in tqdm(training_files):
data = np.load(f)
for d in data:
X.append(np.array(d[0]))
y.append(d[1])
X, y = balance(X, y)
test_x, test_y = balance(test_x, test_y)
X = np.array(X)
y = np.array(y)
test_x = np.array(test_x)
test_y = np.array(test_y)
np.save(f"X-{idx}.npy", X)
np.save(f"y-{idx}.npy", y)
model.fit(X, y, batch_size = 32, epochs = 1, validation_data = (test_x, test_y), callbacks = [tensorboard])
model.save(f"models\{NAME}")
| true |
159a56ddd48ee103085b1a4d2404e9582050d50b | Python | mmreyno/sao-paulo-crime-data | /crime-data-downloader.py | UTF-8 | 1,891 | 2.796875 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
# set paths
path_to_chromedriver = '/usr/lib/python2.7/chromedriver'
browser = webdriver.Chrome(executable_path = path_to_chromedriver)
url = 'http://www.ssp.sp.gov.br/novaestatistica/Pesquisa.aspx'
browser.get(url)
# find the years menu
years = browser.find_element_by_xpath('//*[@id="ContentPlaceHolder1_ddlAnos"]')
allyears = years.find_elements_by_tag_name('option') #find possible years
allyears[3].click() #'4' returns 2012 results, '3' returns 2013, '2' returns 2014
time.sleep(5)
# find region menu
regions = browser.find_element_by_id('ContentPlaceHolder1_ddlRegioes')
allregions = regions.find_elements_by_tag_name('option') #find possible regions
allregions[1].click() #gives capital as region
time.sleep(5)
# find municipality menu
municip = browser.find_element_by_id('ContentPlaceHolder1_ddlMunicipios')
allmunicip = municip.find_elements_by_tag_name('option') #find possible munic
allmunicip[1].click() #gives SP as municipality
time.sleep(5)
# find the monthly crime statistics option
monthlydata = browser.find_element_by_id('ContentPlaceHolder1_btnMensal')
monthlydata.click() #chose monthly stats
time.sleep(5)
# get the districts as numeric values so we can plug them in later
districtsrequired = []
alldelegations = browser.find_element_by_id('ContentPlaceHolder1_ddlDelegacias').find_elements_by_tag_name('option') #find possible delegations
for i in alldelegations: #make a list of districts
districtsrequired.append(i.get_attribute('value'))
# iterate through the menu and select each district
for i in districtsrequired:
delegations = Select(browser.find_element_by_id('ContentPlaceHolder1_ddlDelegacias'))
delegations.select_by_value(str(i))
time.sleep(5)
browser.find_element_by_id('ContentPlaceHolder1_btnExcel').click() #download the CSV we need!!!
| true |
44039900bf4375a16dade14aa4bdb079a1955893 | Python | ajtanskanen/benefits | /fin_benefits/ben_utils.py | UTF-8 | 2,208 | 2.578125 | 3 | [
"MIT"
] | permissive |
import seaborn as sns
import matplotlib.font_manager as font_manager
import math
def get_palette_EK():
colors1=['#003326','#05283d','#ff9d6a','#599956']
colors2=['#295247','#2c495a','#fdaf89','#7cae79']
colors3=['#88b2eb','#ffcb21','#e85c03']
colors=['#295247','#7cae79','#ffcb21','#e85c03','#88b2eb','#2c495a','#fdaf89']
return sns.color_palette(colors)
def get_style_EK():
axes={'axes.facecolor': 'white',
'axes.edgecolor': 'black',
'axes.grid': False,
'axes.axisbelow': 'line',
'axes.labelcolor': 'black',
'figure.facecolor': 'white',
'grid.color': '#b0b0b0',
'grid.linestyle': '-',
'text.color': 'black',
'xtick.color': 'black',
'ytick.color': 'black',
'xtick.direction': 'out',
'ytick.direction': 'out',
'lines.solid_capstyle': 'projecting',
'patch.edgecolor': 'black',
'patch.force_edgecolor': False,
'image.cmap': 'viridis',
'font.family': ['sans-serif'],
'font.sans-serif': ['IBM Plex Sans',
'DejaVu Sans',
'Bitstream Vera Sans',
'Computer Modern Sans Serif',
'Lucida Grande',
'Verdana',
'Geneva',
'Lucid',
'Arial',
'Helvetica',
'Avant Garde',
'sans-serif'],
'xtick.bottom': True,
'xtick.top': False,
'ytick.left': True,
'ytick.right': False,
'axes.spines.left': False,
'axes.spines.bottom': True,
'axes.spines.right': False,
'axes.spines.top': False}
return axes
def print_q(a):
'''
pretty printer for dict
'''
for x in a.keys():
if a[x]>0 or a[x]<0:
print('{}:{:.2f} '.format(x,a[x]),end='')
print('')
def compare_q_print(q,q2,omat='omat_',puoliso='puoliso_'):
'''
Helper function that prettyprints arrays
'''
for key in q:
if key in q and key in q2:
if not math.isclose(q[key],q2[key]):
d=q[key]-q2[key]
print(f'{key}: {q[key]:.2f} vs {q2[key]:.2f} delta {d:.2f}')
else:
if key in q:
print(key,' not in q2')
else:
print(key,' not in q') | true |
3e622c42e8c9e709b221172d72d897bfb8311558 | Python | haoyu-zhao/Multi-Objective-Optimization-for-Football-Team-Member-Selection | /ESP/code/mutation.py | UTF-8 | 1,696 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
This is the open source code of the paper "Multi-Objective Optimization for Football Team Member Selection"
If you have any questions, please contact the author of the paper
"""
import random
import copy
def get_GK(data):
GK_tag = data[data['attackscore']==0]['Tag']
player_tag = data[data['attackscore']!=0]['Tag']
return GK_tag,player_tag
def mutation(population, pm, eta_m, minmax):
populationSize = len(population)
newPopulation = copy.deepcopy(population)
x_num = len(population[0])
for i in range(populationSize):
for j in range(x_num):
r=random.random()
y=population[i][j]
if r<=pm:
ylow,yup = minmax[j]
delta1=1.0*(y-ylow)/(yup-ylow)
delta2=1.0*(yup-y)/(yup-ylow)
#delta=min(delta1, delta2)
r=random.random()
mut_pow=1.0/(eta_m+1.0)
if r<=0.5:
xy=1.0-delta1
val=2.0*r+(1.0-2.0*r)*(xy**(eta_m+1.0))
deltaq=val**mut_pow-1.0
else:
xy=1.0-delta2
val=2.0*(1.0-r)+2.0*(r-0.5)*(xy**(eta_m+1.0))
deltaq=1.0-val**mut_pow
y=y+deltaq*(yup-ylow)
y=round(y)
y=min(yup, max(y, ylow))
if y in newPopulation[i]:
continue
newPopulation[i][j]=y
temp = newPopulation[i][1:]
temp.sort()
newPopulation[i][1:] = temp
return newPopulation | true |
56ebc86946c6c5fb412aac31721b31e63c9c0ebf | Python | chianobi/nlp-final-slam | /data_collector.py | UTF-8 | 1,942 | 2.765625 | 3 | [
"MIT"
] | permissive | """
Lucino Chiafullo
Quick/dirty file that collects & pickles data
"""
import pickle
import random
from newsapi import NewsApiClient
import datetime
bait_headlines = []
legit_headlines = []
now = datetime.datetime.now()
now_str = now.strftime('%Y') + "-" + now.strftime('%m') + "-" + now.strftime('%d')
monthago = now - datetime.timedelta(days=29)
monthago_str = monthago.strftime('%Y') + "-" + monthago.strftime('%m') + "-" + monthago.strftime('%d')
def create_labeled_data():
news_api = NewsApiClient(api_key='063f02817dbb49528058d7372964f645')
x = 1
while x <= 10:
b_headlines = \
news_api.get_everything(sources='buzzfeed', from_param=monthago_str, to=now_str,
language='en',
sort_by='relevancy', page_size=100, page=x)['articles']
r_headlines = \
news_api.get_everything(sources='reuters', from_param=monthago_str, to=now_str,
language='en', sort_by='relevancy', page_size=100, page=x)['articles']
a_headlines = \
news_api.get_everything(sources='associated-press', from_param=monthago_str, to=now_str,
language='en', sort_by='relevancy', page_size=100, page=x)['articles']
b_titles = [article['title'] for article in b_headlines]
b_titles = list(filter(None.__ne__, b_titles))
b_titles = [(article, 'bait') for article in b_titles]
bait_headlines.extend(b_titles)
r_titles = [(article['title'], 'not_bait') for article in r_headlines]
legit_headlines.extend(r_titles)
a_titles = [(article['title'], 'not_bait') for article in a_headlines]
legit_headlines.extend(a_titles)
x += 1
all_headlines = bait_headlines + legit_headlines
random.shuffle(all_headlines)
pickle.dump(all_headlines, open('headlines.p', 'wb'))
if __name__ == '__main__':
create_labeled_data()
| true |
986bfa374e8bf0e454af9778c76251408ec77c5a | Python | madtyn/pythonHeadFirst | /pythonDesignPatterns/src/composite/menuiterator/menu.py | UTF-8 | 2,826 | 3.40625 | 3 | [] | no_license | from abc import ABCMeta, abstractmethod
from pythonDesignPatterns.src.composite.menuiterator.iterator import NullIterator, CompositeIterator, Iterator
class MenuComponent(metaclass=ABCMeta):
"""
A class which allowes to contain another instances of the same class
"""
def add(self, menuComponent):
"""
Adds a menuComponent instance to this menuComponent
:param menuComponent: the menuComponent instance to add
:return: not implemented, must be overriden, should be None or a boolean
"""
return NotImplemented
def remove(self, menuComponent):
"""
:param menuComponent: the menuComponent instance to remove
:return: not implemented, must be overriden, it should return the removed element
"""
return NotImplemented
def getChild(self, i):
"""
Retrieves the i-th menuComponent contained within this class
:param i: the index where the retrieved element is
:return: the i-th child element
"""
return NotImplemented
@abstractmethod
def createIterator(self):
pass
def print(self):
return NotImplemented
class Menu(MenuComponent):
"""
A concrete instantiation of MenuComponent, which can hold other MenuComponent instances
as instance of Menu itself and MenuItem
"""
def __init__(self, name, description):
super().__init__()
self.name = name
self.description = description
self.menuComponents = []
def add(self, menuComponent):
self.menuComponents.append(menuComponent)
def remove(self, menuComponent):
self.menuComponents.remove(menuComponent)
def getChild(self, i):
return self.menuComponents[i]
def createIterator(self):
return CompositeIterator(Iterator.createIterator(self.menuComponents))
def print(self):
print("\n{}".format(self.name),)
print(", {}".format(self.description))
print("---------------------")
iterator = Iterator.createIterator(self.menuComponents)
while iterator.hasNext():
menuComponent = iterator.getNext()
menuComponent.print()
class MenuItem(MenuComponent):
def __init__(self, name, description, vegetarian, price) -> None:
super().__init__()
self.name = name
self.description = description
self.vegetarian = vegetarian
self.price = price
def createIterator(self):
return NullIterator()
def print(self):
print(" {}".format(self.name),)
if self.vegetarian:
print("(v)",)
print(", {}".format(self.price))
print(" -- {}".format(self.description))
#vv MenuItemCompositeV2Main
#^^ MenuItemCompositeV2Main
#^^ MenuItemCompositeV2
| true |
57805dc3b1a32cb79ef67cac869846e5b9ae3f70 | Python | srinjoychakravarty/parallel_ml | /part2.py | UTF-8 | 2,356 | 2.78125 | 3 | [] | no_license | from pandas import read_csv
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
import matplotlib
matplotlib.use('Agg') # prevents matplotlib error on discovery cluster
from matplotlib import pyplot
import numpy
import time
data = read_csv('train.csv')
dataset = data.values
X = dataset[:25000, 0:94] # split data into X and y and sampling from total row entries to save time
y = dataset[:25000, 94]
label_encoded_y = LabelEncoder().fit_transform(y)
model = XGBClassifier()
number_of_trees = [100, 200, 300, 400, 500]
# number_of_trees = [60, 70, 80, 90, 100]
max_depth = [1]
subsample = [1.0]
colsample_bytree = [0.18]
learning_rate = [0.0001, 0.001, 0.01, 0.1]
gamma = [0]
objective = ['binary:logistic']
reg_alpha = [0.2]
reg_lambda = [0.4]
num_threads = [8, 7, 6, 5, 4, 3, 2, 1]
param_grid = {'reg_lambda': reg_lambda,'reg_alpha': reg_alpha, 'objective': objective, 'gamma': gamma, 'colsample_bytree': colsample_bytree, 'subsample': subsample, 'max_depth': max_depth, 'n_estimators': number_of_trees, 'learning_rate': learning_rate}
kfold = StratifiedKFold(n_splits = 10, shuffle = True, random_state = 93)
for n in num_threads:
print(f'Number of Threads: {n}')
start = time.time()
grid_search = GridSearchCV(model, param_grid, scoring="neg_log_loss", n_jobs=n, cv=kfold) # grid search
grid_result = grid_search.fit(X, label_encoded_y)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
scores = numpy.array(means).reshape(len(learning_rate), len(number_of_trees))
elapsed = time.time() - start
print(f'Elapsed Duration: {elapsed}')
scores = numpy.array(means).reshape(len(learning_rate), len(number_of_trees))
for i, value in enumerate(learning_rate):
pyplot.plot(number_of_trees, scores[i], label = 'learning_rate: ' + str(value))
pyplot.legend()
pyplot.xlabel('number_of_trees')
pyplot.ylabel('Log Loss')
pyplot.savefig('number_of_trees_x_axis_vs_log_loss_y_axis')
| true |
7c6ab0e7aae8ea942fef2dcd0968506f9ab81c04 | Python | subhamrex/Coding_Practice | /Python/Practice/Third_max.py | UTF-8 | 670 | 3.5625 | 4 | [] | no_license | import sys
def thirdLargest(arr, arr_size):
if (arr_size < 3):
return max(arr)
if()
first = arr[0]
for i in range(1, arr_size):
if (arr[i] > first):
first = arr[i]
second = -sys.maxsize
for i in range(0, arr_size):
if (arr[i] > second and
arr[i] < first):
second = arr[i]
third = -sys.maxsize
for i in range(0, arr_size):
if (arr[i] > third and
arr[i] < second):
third = arr[i]
return third
# Driver Code
arr = [1,1,2]
n = len(arr)
print("The Third Largest",
"element is", thirdLargest(arr, n))
| true |
47342103e2a686f81eee223e0ea06020561bb28c | Python | tuoniaoren/code | /排序/radix_sort.py | UTF-8 | 560 | 3.46875 | 3 | [] | no_license | def radix_sort(li):
max_num = max(li)
it = 0 # 计算位数,也可以直接用log(max_num)向下取整
while 10 ** it <= max_num:
buckets = [[] for _ in range(10)]
for var in li:
# 987 it=1 987//10->98, 98%10->8; it=2 987//100->9, 9%10=9
digit = (var // 10 ** it) % 10
buckets[digit].append(var)
# 分桶完毕
li.clear()
for buc in buckets:
li.extend(buc)
it += 1
import random
li = list(range(1000))
random.shuffle(li)
radix_sort(li)
print(li)
| true |
31ae452eee44a63ded92150e8325179a84cf8e3c | Python | GersonFeDutra/Python-exercises | /CursoemVideo/2020/world_2/ex040.py | UTF-8 | 370 | 3.84375 | 4 | [] | no_license | SEMESTERS: int = 2
medium: float
notes: list = []
for i in range(SEMESTERS):
notes.append(float(input(f'What is your {i + 1}° note? ')))
medium = sum(notes) / SEMESTERS
print(f'Your medium is {medium:.1f}.')
if medium < 5:
print('\033[31mDISAPPROVED!')
elif medium < 7:
print('\033[33mRECOVERY!')
else:
print('\033[32mAPPROVED!')
| true |
aecbfbcad9ba3842382663558e3abce8cfa23454 | Python | jordanblakey/algorithms-dojo | /Python/MultiProcessing/sharing_data.py | UTF-8 | 499 | 3.421875 | 3 | [] | no_license | import multiprocessing
result = []
def calc_square(numbers, result, value):
value.value = 5.67
for i, n in enumerate(numbers):
result[i] = n*n
print('inside process ' + str(result))
if __name__ == "__main__":
numbers = [2, 3, 5]
result = multiprocessing.Array('i', 3)
value = multiprocessing.Value('d', 0.0)
p = multiprocessing.Process(target=calc_square, args=(numbers, result, value))
p.start()
p.join()
print(result[:])
print(value.value)
| true |
922bc6a7ff9f6f8bbc8eb8b5b838e2a081814905 | Python | Blnjjj/GENEA_2020 | /VariationalAutoEncoder/vae.py | UTF-8 | 1,602 | 2.625 | 3 | [] | no_license | import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
HIDDEN_DIM = 40
INPUT_DIM = 45
OUTPUT_DIM = 20
class VariationalAutoEncoder(nn.Module):
def __init__(self):
super(VariationalAutoEncoder, self).__init__()
self.encoder_hidden = nn.Linear(INPUT_DIM, HIDDEN_DIM)
self.encoder_mu = nn.Linear(HIDDEN_DIM, OUTPUT_DIM)
self.encoder_sigma = nn.Linear(HIDDEN_DIM, OUTPUT_DIM)
self.decoder_hidden = nn.Linear(OUTPUT_DIM, HIDDEN_DIM)
self.decoder_out = nn.Linear(HIDDEN_DIM, INPUT_DIM)
def encode(self, x):
x = self.encoder_hidden(x)
x = torch.relu(x)
return self.encoder_mu(x), self.encoder_sigma(x)
def reparametrize(self, mu, sigma):
std = torch.exp(0.5*sigma)
eps = torch.rand_like(std)
return mu + eps*std
def decode(self, z):
z = self.decoder_hidden(z)
z = torch.relu(z)
z = self.decoder_out(z)
return torch.tanh(z)
def forward(self, x):
mu, sigma = self.encode(x)
z = self.reparametrize(mu, sigma)
return self.decode(z), mu, sigma
class VAELoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(VAELoss, self).__init__(size_average, reduce, reduction)
def forward(self, outputs, labels):
reconstruct, mu, sigma = outputs
recons_loss = F.mse_loss(reconstruct, labels)
kld_loss = torch.mean(-0.5 * torch.sum(1 + sigma - mu ** 2 - sigma.exp(), dim=1), dim=0)
return recons_loss + kld_loss
| true |
39e3286b4fe9cdccd5142199f2dc684de5069e60 | Python | Mauzey/MSc-Data-Science-and-Business-Analytics | /COMP5000/Week-4/Q3.py | UTF-8 | 833 | 3.703125 | 4 | [] | no_license | # Q3: SQL and Python
# import and setup
import sqlite3
import pandas as pd
connection = sqlite3.connect('student_record.db')
cursor = connection.cursor()
# task: create a script where the user can input an sql query, which is then executed
def execute_query(query, head=False):
"""
execute a defined query
:param head: (bool) return dataframe head
:param query: (string) the query to be executed
:return: (dataframe) query results
"""
print("[INFO] Executing Query...")
try:
cursor.execute(query)
result = cursor.fetchall()
return pd.DataFrame(result)
except sqlite3.OperationalError:
print('[ERROR] Invalid query submitted, please check for mistakes and try again')
user_query = input("Enter an SQL Query:\n")
print(execute_query(user_query, head=False))
| true |
f736c6866167d2238394e08a9e6180432c4e8dfa | Python | maxxiefjv/ACVPR-ExpressionInvariantFaceRecognition | /Ananthu_face_detection/smile_face_detection_ananthu.py | UTF-8 | 2,478 | 3.578125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# Face Recognition + Smile Recognition
# Importing the libraries
import cv2
# Loading the cascades
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') # Load the cascade for the face.
smile_cascade = cv2.CascadeClassifier('haarcascade_smile.xml') # Load the cascade for the smile
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') # Load the cascade for the eye
# Defining the function that will do the detection
def detect(grey, frame): # Create a function that takes as input the image in black and white (grey) and the original image (frame), and that will return the same image with the detector rectangles.
faces = face_cascade.detectMultiScale(grey, 1.3, 5) # Apply the detectMultiScale method from the face cascade to locate one or several faces in the image.
for(x,y,w,h) in faces: # For each detected face:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2) # Paint a rectangle around the face.
roi_grey = grey[y:y+h, x:x+w] # Get the region of interest in the black and white image.
roi_color = frame[y:y+h, x:x+w] # Get the region of interest in the colored image.
eyes = eye_cascade.detectMultiScale(roi_grey, 1.1, 16) # Apply the detectMultiScale method to locate the smile in the image of ther face previously obtained.
for(ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
smiles = smile_cascade.detectMultiScale(roi_grey, 1.7, 27) # Apply the detectMultiScale method to locate one or several eyes in the image of ther face previously obtained.
for(sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color,(sx,sy),(sx+sw,sy+sh),(255,0,0),2)
return frame # Return the image with the detector rectangles.
# Doing some face recognition with the webcam
video_capture = cv2.VideoCapture(0); # 0 -Internal webcam, 1- External Webcam
while True:
_, frame = video_capture.read() # Get the last frame.
grey = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) # Similar to rgb2gray in MATLAB
canvas = detect(grey,frame)
cv2.imshow('Video',canvas) # Display the output.
if cv2.waitKey(1) & 0xFF == ord('q'): # Press the 'q' button in keyboard to stop running the program
break
video_capture.release() # Turn the webcam off.
cv2.destroyAllWindows() # Destroy all the windows inside which the images were displayed. | true |
954983df98519dada265945e954455ef11266b86 | Python | jeffknupp/link | /link/link.py | UTF-8 | 18,590 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
link
~~~~~~~~~~~~
The link module helps you connect to all of the data sources you need through a
simple configuration
Sample Config to connect to mysql::
{
"dbs":{
"my_db": {
"wrapper": "MysqlDB",
"host": "mysql-master.123fakestreet.net",
"password": "<password>",
"user": "<user>",
"database": "<database_name>"
}
}
}
Sample Code::
In [3]: from link import lnk
# uses the keys from your configuration to look up and create the
# appropriate objects
In [35]: my_db = lnk.dbs.my_db
In [36]: data = my_db.select('select id from my_table')
:copyright: (c) 2013 by David Himrod
:license: Apache2, see LICENSE for more details.
"""
import os
import sys
import inspect
import json
from utils import load_json_file
from subprocess import Popen
from common import Cacheable
# To set up logging manager
from _logging_setup import LogHandler
# this gets the current directory of link
lnk_dir = os.path.split(os.path.abspath(__file__))[0]
class Callable(object):
"""
A callable object that has a run_shell method
"""
@property
def command(self):
"""
Here is the command for doing the mysql command
"""
raise NotImplementedError('You have not defined a command for this Callable Object')
def __call__(self, command=None, wait=True):
"""
When you call this Callable it will run the command. The command can
either be a string to run on the shell or a function to run in python
Right now it only supports string commands for the shell
"""
cmd = command or self.command
# import pdb; pdb.set_trace()
if cmd:
p = Popen(cmd, shell=True)
if wait:
p.wait()
return p
class Commander(object):
"""
Given a dictionary of commands the commander can run them very easily
"""
def __init__(self, commands=None, base_dir=''):
self.base_dir = base_dir
self.commands = {}
if commands:
self.commands = commands
elif not commands and self.base_dir:
self.commands = dict([(key, key) for key in self.list_base_dir()])
else:
self.commands = {}
def set_base_dir(self, base_dir):
"""
set the base dir which will uncache the commands it has stored
"""
self.base_dir = base_dir
def list_base_dir(self):
"""
list what is in the base_dir, nothing if doesnt exist
"""
try:
return os.listdir(self.base_dir)
except:
return []
def has_command(self, name):
"""
Returns true if this commander has a command by this name
"""
return self.commands.has_key(name)
def run_command(self, name="__default__", base_dir='', *kargs, **kwargs):
"""
Run the command in your command dictionary
"""
if not base_dir:
base_dir = self.base_dir
if self.commands:
# make a copy of the arra
cmd = self.commands.get(name)
if cmd:
if not isinstance(cmd, list):
cmd = [cmd]
# make a copy of it so you don't change it
else:
cmd = cmd[:]
cmd.extend(map(str, kargs))
cmd = '%s/%s' % (base_dir, "/".join(cmd))
p = Popen(cmd, shell=True)
p.wait()
return p
raise(Exception("No such command %s " % name))
def command(self, name=None):
"""
Returns the command function that you can pass arguments into
"""
def runner(*kargs, **kwargs):
return self.run_command(name, *kargs, **kwargs)
return runner
class Link(object):
"""
Link is a singleton that keeps all configuration for the program. It is
used by any Linked object so that there can be a shared configuration across
the entire program. The configuration is stored in json in a file called
.vlink.config
"""
__link_instance = None
__msg = None
LNK_USER_DIR = '%s/.link' % os.getenv('HOME')
LNK_DIR = os.getenv('LNK_DIR') or LNK_USER_DIR
LNK_CONFIG = LNK_DIR + "/link.config"
DEFAULT_CONFIG = {"dbs": {}, "apis": {}}
@classmethod
def plugins_directory(cls):
"""
Tells you where the external wrapper plugins exist
"""
if cls.LNK_DIR and os.path.exists(cls.LNK_DIR):
plugin_dir = cls.LNK_DIR + "/plugins"
if not os.path.exists(plugin_dir):
os.makedirs(plugin_dir)
return plugin_dir
raise Exception("Problem creating plugins, Link directory does not exist")
@classmethod
def plugins_directory_tmp(cls):
"""
Tells you where the external wrapper plugins exist
"""
plugins = cls.plugins_directory()
tmp = plugins + "/tmp"
if not os.path.exists(tmp):
os.makedirs(tmp)
return tmp
@classmethod
def config_file(cls):
"""
Gives you the global config based on the hierchial lookup::
first check $LNK_DIR/link.config
then check ./link.config
"""
# if there is a user global then use that
if os.path.exists(cls.LNK_CONFIG):
return cls.LNK_CONFIG
# if they ore in iPython and there is no user config
# lets create the user config for them
if "IPython" in sys.modules:
if not os.path.exists(cls.LNK_DIR):
print "Creating user config dir %s " % cls.LNK_DIR
os.makedirs(cls.LNK_DIR)
print "Creating default user config "
new_config = open(cls.LNK_CONFIG, 'w')
new_config.write(json.dumps(cls.DEFAULT_CONFIG))
new_config.close()
return cls.LNK_CONFIG
return None
@classmethod
def instance(cls):
"""
Gives you a singleton instance of the Link object
which shares your configuration across all other Linked
objects. This is called called to create lnk and for the
most part should not be called again
"""
if cls.__link_instance:
return cls.__link_instance
cls.__link_instance = Link()
return cls.__link_instance
def _get_all_wrappers(self, mod_or_package):
"""
Given a module or package name in returns all
classes that could possibly be a wrapper in
a dictionary
"""
try:
wrapper_mod = __import__(mod_or_package, fromlist=['*'])
except ImportError as e:
raise
# get all classes by name and put them into a dictionary
wrapper_classes = dict([(name, cls) for name, cls in
inspect.getmembers(wrapper_mod) if
inspect.isclass(cls)])
return wrapper_classes
def load_wrappers(self):
"""
loads up all the wrappers that can be accessed right now
"""
# load all the standard ones first
self.wrappers = self._get_all_wrappers('link.wrappers')
directories = self._config.get('external_wrapper_directories')
self.load_wrapper_directories(directories)
packages = self._config.get('external_wrapper_packages')
self.load_wrapper_packages(packages)
def load_wrapper_directories(self, directories):
"""
Load in all of the external_wrapper_directories
"""
if directories:
for ext_path in directories:
path_details = ext_path.rstrip('/').split('/')
# the path we add to sys.path is this without the last word
path = '/'.join(path_details[:-1])
mod = path_details[-1]
# TODO: Need to put an error here if this directory does not
# exist
if path not in sys.path:
sys.path.append(path)
wrapper_classes = self._get_all_wrappers(mod)
self.wrappers.update(wrapper_classes)
def load_wrapper_packages(self, packages):
"""
load in all of the external_wrapper_packages
"""
# load in all the packages
if packages:
for ext_mod in packages:
wrapper_classes = self._get_all_wrappers(ext_mod)
self.wrappers.update(wrapper_classes)
@property
def _config(self):
"""
Lazy load the config so that any errors happen then
"""
if not self.__config_file:
# If there is not config file then return an error.
# TODO: Refactor the config code, it's overly confusing
raise Exception("""No config found. Set environment variable LNK_DIR to
point to your link configuration directory or create a
#.link/link.config file in your HOME directory""")
if not self.__config:
self.__config = load_json_file(self.__config_file)
return self.__config
def fresh(self, config_file=None, namespace=None):
"""
sets the environment with a fresh config or namespace that is not
the defaults if config_file or namespace parameters are given
"""
if not config_file:
config_file = self.config_file()
self.__config_file = config_file
self.__config = None
# I don't think i want to support this feature anymore
# self._commander = self.__config.get('__cmds__')
# self._commander = self.__config.get('__scripts__')
self.namespace = namespace
self.wrappers = {}
def __init__(self, config_file=None, namespace=None):
"""
Create a new instance of the Link. Should be done
through the instance() method.
"""
# this will be lazy loaded
self.__config = None
self.wrappers = {}
self.fresh(config_file, namespace)
def configure_msg(self, overrides={}, keep_existing=True, verbose=False):
"""
Optional method to supplement or override logging configuration from a passed-in
dictionary; OR to turn on verbose logging. If this method is not invoked,
logging will be set up with any logging configurations found in the link config.
This method is useful if you want to define job- or application-specific logging
setups, so that you do not have to keep changing link.config.
NOTE: if this is called after lnk.msg has been used, it will create a new
LogHandler that will override the existing one.
:param dict overrides: dictionary of logging options
:param bool keep_existing: whether to supplement (True) or completely override
(False) any existing logging (i.e. link.config options).
"""
if not keep_existing:
log_conf = overrides
else:
log_conf = self._config.get('msg', {})
log_conf.update(overrides)
self.__msg = LogHandler(log_conf, verbose)
return self.__msg
# The fact that Link is a singleton should ensure that only one instance of
# LogHandler will be created (if Link is used correctly...)
@property
def msg(self, verbose=False):
"""
Get (and create if doesn't already have) the LoggingHandler for link
:param bool verbose: log to stdout (DEBUG and higher) in addition to any other
logging setup
"""
if not self.__msg:
self.__msg = LogHandler(self._config.get('msg', {}), verbose)
return self.__msg
def __getattr__(self, name):
"""
The lnk object will first look for its native functions to call.
If they aren't there then it will create a wrapper for the configuration
that is led to by "name"
"""
try:
return self.__getattribute__(name)
except Exception as e:
return self(wrap_name=name, **self._config[name].copy())
def config(self, config_lookup=None):
"""
If you have a conf_key then return the
dictionary of the configuration
"""
ret = self._config
if config_lookup:
try:
for value in config_lookup.split('.'):
ret = ret[value]
except KeyError:
raise KeyError('No such configured object %s' % config_lookup)
return ret
return ret
def __call__(self, wrap_name=None, *kargs, **kwargs):
"""
Get a wrapper given the name or some arguments
"""
wrap_config = {}
if wrap_name:
wrap_config = self.config(wrap_name)
# if its just a string, make a wrapper that is preloaded with
# the string as the command.
if isinstance(wrap_config, str) or isinstance(wrap_config, unicode):
return Wrapper(__cmd__=wrap_config)
wrap_config = wrap_config.copy()
# if they override the config then update what is in the config with the
# parameters passed in
if kwargs:
wrap_config.update(kwargs)
# if it is here we want to remove before we pass through
wrapper = self._get_wrapper(wrap_config.pop('wrapper', None))
return wrapper(wrap_name=wrap_name, **wrap_config)
def _get_wrapper(self, wrapper):
"""
calls back the function with a fully wrapped class
"""
# if they tell us what type it should be then use it
if wrapper:
try:
# look up the module in our wrappers dictionary
if not self.wrappers:
self.load_wrappers()
return self.wrappers[wrapper]
except AttributeError as e:
raise Exception('Wrapper cannot be found by the' +
' link class when loading: %s ' % (wrapper))
return Wrapper
def install_plugin(self, file=None, install_global=False):
"""
Install the plugin in either their user plugins directory or
in the global plugins directory depending on what they want to do
"""
if install_global:
cp_dir = os.path.dirname(__file__) + '/plugins'
else:
cp_dir = self.plugins_directory()
import shutil
print "installing %s into directory %s " % (file, cp_dir)
try:
shutil.copy(file, cp_dir)
except:
print "error moving files"
lnk = Link.instance()
class Wrapper(Callable):
"""
The wrapper wraps a piece of the configuration.
"""
_wrapped = None
cmdr = None
def __init__(self, wrap_name=None, wrapped_object=None, **kwargs):
super(Wrapper, self).__init__()
self.wrap_name = wrap_name
self._wrapped = wrapped_object
self.commander = Commander(kwargs.get("__cmds__"))
self.lnk_script_commander = Commander(base_dir =
'%s/scripts' % lnk_dir)
self.script_commander = Commander(base_dir =
'%s/scripts' % os.getcwd())
self.cmdr = self.script_commander
self.loaded = True
self.cache = {}
self.__dict__['__link_config__'] = kwargs
def __getattr__(self, name):
"""
wrap a special object if it exists
"""
# first look if the Wrapper object itself has it
try:
return self.__getattribute__(name)
except AttributeError as e:
pass
if self._wrapped is not None:
# if it has a getattr then try that out otherwise go to getattribute
# TODO: Deeply understand __getattr__ vs __getattribute__.
# this might not be correct
try:
return self._wrapped.__getattr__(name)
except AttributeError as e:
try:
return self._wrapped.__getattribute__(name)
except AttributeError as e:
raise AttributeError("No Such Attribute in wrapper %s" % name)
# then it is trying to unpickle itself and there is no setstate
# TODO: Clean this up, it's crazy and any changes cause bugs
if name == '__setstate__':
raise AttributeError("No such attribute found %s" % name)
# call the wrapper to create a new one
wrapper = '%s.%s' % (self.wrap_name, name)
if self.wrap_name:
return lnk(wrapper)
raise AttributeError("No such attribute found %s" % name)
def config(self):
return self.__link_config__
def install_ipython_completers(): # pragma: no cover
"""Register the Panel type with IPython's tab completion machinery, so
that it knows about accessing column names as attributes."""
from IPython.utils.generics import complete_object
import inspect
@complete_object.when_type(Wrapper)
def complete_wrapper(obj, prev_completions):
"""
Add in all the methods of the _wrapped object so its
visible in iPython as well
"""
if obj._wrapped:
obj_members = inspect.getmembers(obj._wrapped)
prev_completions += [c[0] for c in obj_members]
prev_completions += [c for c in obj.config().keys()]
prev_completions += [command for command in obj.commander.commands.keys()]
prev_completions += [command for command in obj.script_commander.commands.keys()]
prev_completions += [command for command in obj.lnk_script_commander.commands.keys()]
return prev_completions
@complete_object.when_type(Link)
def complete_link(obj, prev_completions):
"""
Add in all the methods of the _wrapped object so its
visible in iPython as well
"""
return prev_completions + [c for c in obj.config().keys()]
# Importing IPython brings in about 200 modules, so we want to avoid it unless
# we're in IPython (when those modules are loaded anyway).
# Code attributed to Pandas, Thanks Wes
if "IPython" in sys.modules: # pragma: no cover
try:
install_ipython_completers()
except Exception:
pass
| true |
277e526ded91446f8965124e163fd481e28f4239 | Python | btil-99/skin-lesion-segmentation | /download_data.py | UTF-8 | 2,415 | 3.109375 | 3 | [] | no_license | from tqdm import tqdm
import requests
import os
import patoolib
import zipfile
# Download data from a specified URL
def download_data(url, directory_name):
response = requests.get(url, stream=True) # HTTP GET Request for url
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1024 # 1 Kilobyte
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(directory_name, 'wb') as file:
for data in response.iter_content(block_size):
# File too large for singular download so stream the download in chunks
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
# Finally, close the progress bar
# Error check
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("ERROR, something went wrong")
# Unpack zipped files
def unzip_file(folder):
# Check whether file exists
if os.path.exists(folder):
# Check file type and unzip with appropriate methods
if folder.endswith('.rar'):
patoolib.extract_archive(folder)
elif folder.endswith('.zip'):
with zipfile.ZipFile(folder, 'r') as zip_ref:
# display progress bar
for content in tqdm(zip_ref.infolist(), desc="Extracting ISIC data "):
zip_ref.extract(content, 'ISIC18Dataset')
os.remove(folder)
# Download ISIC data from link and unzip. Unzip PH2 data if exists.
def download_and_unzip():
zip_location_lesion_imgs = 'isic-image_data.zip'
zip_location_mask_imgs = 'isic-mask_data.zip'
isic_lesion_images_url = 'https://isic-challenge-data.s3.amazonaws.com/2018/ISIC2018_Task1-2_Training_Input.zip'
isic_gt_masks_url = 'https://isic-challenge-data.s3.amazonaws.com/2018/ISIC2018_Task1_Training_GroundTruth.zip'
# Check whether ISIC data has been downloaded, only download if it hasn't previoualy been downloaded
if not os.path.exists(zip_location_lesion_imgs) and not os.path.exists('ISICNumpy') and not os.path.exists('ISIC18Dataset'):
download_data(isic_lesion_images_url, zip_location_lesion_imgs)
download_data(isic_gt_masks_url, zip_location_mask_imgs)
unzip_file(zip_location_lesion_imgs)
unzip_file(zip_location_mask_imgs)
unzip_file('PH2Dataset.rar')
| true |
0b2f0c52efc96abbb09d4d8e047e3078560593a6 | Python | Gani32231/Python-Caluclator | /Caluclatematrx.py | UTF-8 | 600 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri May 29 18:33:41 2020
@author: wwwds
"""
import numpy as np
import tkinter as tk
import tkinter.ttk as ttk
win=tk.TK()
win.title("2x3 Matrix Solver")
def multiply():
a = entry1.get()
a = list(map(int,a))
b = entry2.get()
b = list(map(int,b))
c = entry3.get()
c = list(map(int,c))
d = entry4.get()
d = list(map(int,d))
e = entry5.get()
e = list(map(int,e))
X = np.array((a,b))
Y = np.array((c,d,e))
Z=np.dot(X,Y)
print(Z)
label1 = tk.Label(win,text="Enter first row matrix") | true |
2ccb2da7bdff3d5b385c9a6ed570547226b2fcfc | Python | ebasuke226/ebaGit | /04.GuessBitcoin/GuessBitcoin.py | UTF-8 | 2,380 | 3.125 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from pandas import DataFrame
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
import math
# 終値情報を持つcsvから価格を得ます
def importData():
btc_price = pd.read_csv('coindesk-bpi-USD-close_data-2018-03-14_2018-03-21.csv')
btc_price = btc_price[['Close Price']]
data = np.array(btc_price)
return data
data = importData()
# ここでは15時間前までを見るものとします
days_ago = 15
X = np.zeros((len(data), days_ago))
# 時間による推移を見て、そこから偏差を学習できるようにします
for i in range(0, days_ago):
X[i:len(data), i] = data[0:len(data) - i, 0]
Y =X[:,0]
X = X[:,1:]
# 予測する時間を指定します
predict_time = 1
#学習では、50~2800行目を使うものとします。これは情報の偏りを防ぐためのものであり、全体でも構いません
#train_x = X[50:2800,:]
#train_y = Y[50:2800]
# 残りの全てをテストデータとします
test_x = X[0:len(X)-predict_time,:]
test_y = Y[0:len(Y)-predict_time]
#TODO: LinearRegression を使用して線形回帰モデルで学習させよう。
model = linear_model.LinearRegression()
model.fit(X,Y)
#TODO: pred_y に対して、テストデータを使用して学習結果を代入しましょう
pred_y = model.predict(test_x)
# 予測結果を出力します
result = pd.DataFrame(pred_y)
result.columns = ['pred_y']
result['test_y'] = test_y
# Plot
plt.plot(range(0,len(result)), test_y, label='Actual price', color='blue')
plt.plot(range(0,len(result)), pred_y, label='Predicted price', color='red')
plt.xlabel('Hours')
plt.ylabel('Price ($)')
plt.title('Bitcoin Price')
plt.grid(True)
plt.legend()
plt.show()
# Plot拡大
#plt.plot(range(0,len(result[700:800])), test_y[700:800], label='Actual price', color='blue', marker = 'o')
#plt.plot(range(0,len(result[700:800])), pred_y[700:800], label='Predicted price', color='red', marker ='x')
#plt.xlabel('Hours')
#plt.ylabel('Price ($)')
#plt.title('Bitcoin Price')
#plt.grid(True)
#plt.legend()
#plt.show()
print('回帰係数')
print(model.coef_)
print('切片 (誤差)')
print(model.intercept_)
print('決定係数')
print(model.score(X, Y))
| true |
403856449d5bfb707081b8fdb63c30edea353b15 | Python | xiliuhk/CryptoMobileGUI | /crypt.py | UTF-8 | 2,288 | 2.890625 | 3 | [] | no_license | __author__ = 'x37liu'
import os
import sys
import argparse
from wrapper import Cipher
def main():
# prepare cmd cheat sheet
helpFile = open('helpFile', 'r')
helpStr = helpFile.read()
helpFile.close()
# read args
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description = helpStr)
parser.add_argument('task',
choices = ['ip', 'ci', 'de'],
help = 'Task to complete')
parser.add_argument('key', type = str, help = 'AES key')
parser.add_argument('count', type = str, help = 'Count')
parser.add_argument('direct', type = str, help = 'Direction')
parser.add_argument('bearer', type = int, help = 'Bearer')
parser.add_argument('data', type = str, help = 'Stream')
parser.add_argument('mode', nargs = '?', default='ss',
choices = ['ss', 'fs', 'sf', 'ff'],
help = 'Data I/O options')
parser.add_argument('output', nargs = '?', type = str, help = 'Output path')
args = parser.parse_args()
parseArgs(args)
def output(data, path = ""):
if path == "":
sys.stdout.write(data)
return
else:
file = open(path, 'w')
file.write(data)
file.close()
def parseArgs(args):
count = int(args.count, 16)
if args.mode == 'ff' or args.mode == 'fs':
if not os.path.exists(args.data):
sys.stdout.write('Invalid Data!\n')
return
file = open(args.data, 'r')
data = file.read()
file.close()
else:
data = args.data
cipher = Cipher()
out = ""
if args.task == 'ip':
out = cipher.IP(args.key, count, args.direct, args.bearer, data)
elif args.task == 'ci':
out = cipher.encrypt(args.key, count, args.direct, args.bearer, data)
else:
out = cipher.decrypt(args.key, count, args.direct, args.bearer, data)
if args.mode == 'ss' or args.mode == 'fs':
output(out, "")
elif args.output:
output(out, args.output)
else:
outPath = raw_input('Please specify output path:')
output(out, outPath)
main() | true |
dd4836eb343d13a2042146a4df086af1333b1ca3 | Python | mrmundt/pyomo | /pyomo/contrib/mpc/data/get_cuid.py | UTF-8 | 4,346 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
from pyomo.core.base.componentuid import ComponentUID
from pyomo.util.slices import slice_component_along_sets
from pyomo.core.base.indexed_component_slice import IndexedComponent_slice
from pyomo.dae.flatten import get_slice_for_set
def get_indexed_cuid(var, sets=None, dereference=None, context=None):
"""
Attempts to convert the provided "var" object into a CUID with
with wildcards.
Arguments
---------
var:
Object to process
sets: Tuple of sets
Sets to use if slicing a vardata object
dereference: None or int
Number of times we may access referent attribute to recover a
"base component" from a reference.
context: Block
Block with respect to which slices and CUIDs will be generated
"""
# TODO: Does this function have a good name?
# Should this function be generalized beyond a single indexing set?
if isinstance(var, ComponentUID):
return var
elif isinstance(var, (str, IndexedComponent_slice)):
# TODO: Raise error if string and context is None
return ComponentUID(var, context=context)
# At this point we are assuming var is a Pyomo Var or VarData object.
# Is allowing dereference to be an integer worth the confusion it might
# add?
if dereference is None:
# Does this branch make sense? If given an unattached component,
# we dereference, otherwise we don't dereference.
remaining_dereferences = int(var.parent_block() is None)
else:
remaining_dereferences = int(dereference)
if var.is_indexed():
if var.is_reference() and remaining_dereferences:
remaining_dereferences -= 1
referent = var.referent
if isinstance(referent, IndexedComponent_slice):
return ComponentUID(referent, context=context)
else:
# If dereference is None, we propagate None, dereferencing
# until we either reach a component attached to a block
# or reach a non-reference component.
dereference = (
dereference if dereference is None else remaining_dereferences
)
# NOTE: Calling this function recursively
return get_indexed_cuid(referent, sets, dereference=dereference)
else:
# Assume that var is indexed only by time
# TODO: Should we call slice_component_along_sets here as well?
# To cover the case of b[t0].var, where var is indexed
# by a set we care about, and we also care about time...
# But then maybe we should slice only the sets we care about...
# Don't want to do anything with these sets unless we're
# presented with a vardata...
#
# Should we call flatten.slice_component_along_sets? Then we
# might need to return/yield multiple components here...
# I like making this a "simple" function. The caller can call
# slice_component_along_set on their input data if they expect
# to have components indexed by multiple sets.
#
# TODO: Assert that we're only indexed by the specified set(s)?
# (If these sets are provided, of course...)
index = tuple(get_slice_for_set(s) for s in var.index_set().subsets())
return ComponentUID(var[index], context=context)
else:
if sets is None:
raise ValueError(
"A ComponentData %s was provided but no set. We need to know\n"
"what set this component should be indexed by." % var.name
)
slice_ = slice_component_along_sets(var, sets)
return ComponentUID(slice_, context=context)
| true |
8030df2da810446a4ad2b2640e49a0e2c9c20a5f | Python | wsdxl/excises_python | /excise_unittest/练习代码/hander_request.py | UTF-8 | 1,781 | 2.640625 | 3 | [] | no_license | """
============================
Author : XiaoLei.Du
Time : 2020/4/11 15:42
E-mail : 506615839@qq.com
File : hander_request.py
============================
"""
import requests
class HandleRequest:
'''使用token鉴权的接口,使用这个类去发送请求'''
def send(self, url, method, data=None, json=None, params=None, headers=None):
# 将方法转化为小写
method=method.lower()
if method == 'post':
return requests.post(url=url, json=json, data=data, headers=headers)
elif method == 'patch':
return requests.post(url=url, json=json, data=data, headers=headers)
elif method == 'get':
return requests.post(url=url, params=params, headers=headers)
class HandeSession:
'''使用session鉴权的接口,使用这个类发送请求'''
def __init__(self):
self.se=requests.session()
def send(self, url, method, data=None, json=None, params=None, headers=None):
# 将方法转化为小写
method=method.lower()
if method == 'post':
return self.se.post(url=url, json=json, data=data, headers=headers)
elif method == 'patch':
return self.se.post(url=url, json=json, data=data, headers=headers)
elif method == 'get':
return self.se.post(url=url, params=params, headers=headers)
if __name__ == '__main__':
login_url = 'http://api.lemonban.com/futureloan/member/login'
# 登录数据
data = {'mobile_phone': '13641878150',
'pwd': '12345678'
}
headers = {"X-Lemonban-Media-Type": "lemonban.v2", "Content-Type": "application/json"}
http=HandleRequest()
res=http.send(url=login_url,method='POST',json=data,headers=headers)
print(res.json()) | true |
4b17c1c972f79e5efc9438f2f0993ecf0ee572af | Python | xServo/Tic-Tac-Toe | /tictactoe.py | UTF-8 | 1,416 | 4 | 4 | [] | no_license | #Tic-Tac-Toe
import os
row1 = [" ", " ", " "]
row2 = [" ", " ", " "]
row3 = [" ", " ", " "]
turns = 1
def board():
os.system("clear")
print(" | | ")
print(" " + row1[0] + " | " + row1[1] + " | " + row1[2] + " ")
print("___|___|___")
print(" | | ")
print(" " + row2[0] + " | " + row2[1] + " | " + row2[2] + " ")
print("___|___|___")
print(" | | ")
print(" " + row3[0] + " | " + row3[1] + " | " + row3[2] + " ")
print(" | | ")
print("Welcome to Tic-Tac-Toe.\nWhen the game is finished type \"done\"")
#board
while True:
#player 1
board()
row = input("\nPlayer 2:\nPick your row. ")
if row == "done":
row1 = [" ", " ", " "]
row2 = [" ", " ", " "]
row3 = [" ", " ", " "]
input("Game over.\nPress enter to play again.")
column = input("Pick your column. ")
if row == "1":
row1[int(column) - 1] = "x"
elif row == "2":
row2[int(column) - 1] = "x"
elif row == "3":
row3[int(column) - 1] = "x"
#player 2
board()
row = input("\nPlayer 2:\nPick your row. ")
if row == "done":
row1 = [" ", " ", " "]
row2 = [" ", " ", " "]
row3 = [" ", " ", " "]
input("Game over.\nPress enter to play again.")
os.system("clear")
column = input("Pick your column. ")
if row == "1":
row1[int(column) - 1] = "o"
elif row == "2":
row2[int(column) - 1] = "o"
elif row == "3":
row3[int(column) - 1] = "o"
input("Game over.\nPress enter to play again.")
| true |
f7a03878b17b8dfe452423f022ccae027881c55e | Python | liubing1545/tensorflow-learn | /tensor.py | UTF-8 | 858 | 3.125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 10:53:59 2018
@author: liu.bing
"""
import tensorflow as tf
import numpy as np
# create 100 phony x,y data points in numpy
x_data = np.random.rand(100).astype("float32")
y_data = x_data * 0.1 + 0.3
# try to find values for w and b that compute y_data = W * x_data + b
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# minimize the mean squared errors
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# before starting, initialize the variables
init = tf.initialize_all_variables()
# launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
| true |
025b77c98a7e6c0d34bcd7d114ebff6ad172c4ca | Python | vedcoder/python | /pascal triangl.py | UTF-8 | 485 | 3.484375 | 3 | [] | no_license | rows = int(input("how many rows do u want in da pascal triangle ?"))
pascal = 1
numbersList = []
for r in range(1, rows+1):
pasNum = " " * (rows-r)
rowList = []
for c in range(1, r+1):
pascal = 1
if c != 1 and c != r:
lastnumberlist = numbersList[r - 2]
pascal = lastnumberlist[c - 2] + lastnumberlist[c - 1]
rowList.append(pascal)
pasNum = pasNum + str(pascal) + " "
numbersList.append(rowList)
print(pasNum) | true |
70cb62e865caffef0a538ed5619e902013fdc421 | Python | b-eyselein/rose | /sp_validation.py | UTF-8 | 1,010 | 3.21875 | 3 | [] | no_license | from base.field import Field, Cell
def __compare_cells__(user_cell: Cell, sample_cell: Cell, mind_colors: bool) -> bool:
if mind_colors:
return user_cell.color == sample_cell.color
else:
return user_cell.is_marked() == sample_cell.is_marked()
def __fields_equal__(user_field: Field, sample_field: Field, mind_colors: bool) -> bool:
if user_field.height != sample_field.height or user_field.width != sample_field.width:
return False
for row_index in range(user_field.height):
for cell_index in range(user_field.width):
user_cell = user_field.get_by_coordinates(row_index, cell_index)
sample_cell = sample_field.get_by_coordinates(row_index, cell_index)
if not __compare_cells__(user_cell, sample_cell, mind_colors):
return False
return True
def validate(user_field: Field, sample_field: Field, mind_colors: bool = False) -> bool:
return __fields_equal__(user_field, sample_field, mind_colors)
| true |
b6b08d58d64007b1382b5433716b710db30a824b | Python | est271/geomstats | /geomstats/geometry/riemannian_metric.py | UTF-8 | 12,290 | 3.078125 | 3 | [
"MIT"
] | permissive | """Riemannian and pseudo-Riemannian metrics."""
import math
import autograd
import geomstats.backend as gs
from geomstats.geometry.connection import Connection
EPSILON = 1e-4
N_CENTERS = 10
TOLERANCE = 1e-5
N_REPETITIONS = 20
N_MAX_ITERATIONS = 50000
N_STEPS = 10
def loss(y_pred, y_true, metric):
"""Compute loss function between prediction and ground truth.
Loss function given by a Riemannian metric,
expressed as the squared geodesic distance between the prediction
and the ground truth.
Parameters
----------
y_pred
y_true
metric
Returns
-------
loss
"""
loss = metric.squared_dist(y_pred, y_true)
return loss
def grad(y_pred, y_true, metric):
"""Closed-form for the gradient of the loss function."""
tangent_vec = metric.log(base_point=y_pred, point=y_true)
grad_vec = - 2. * tangent_vec
inner_prod_mat = metric.inner_product_matrix(base_point=y_pred)
grad = gs.einsum('ni,nij->ni',
grad_vec,
gs.transpose(inner_prod_mat, axes=(0, 2, 1)))
return grad
class RiemannianMetric(Connection):
"""Class for Riemannian and pseudo-Riemannian metrics."""
def __init__(self, dimension, signature=None):
assert isinstance(dimension, int) or dimension == math.inf
assert dimension > 0
super().__init__(dimension=dimension)
self.signature = signature
def inner_product_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
raise NotImplementedError(
'The computation of the inner product matrix'
' is not implemented.')
def inner_product_inverse_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_matrix = self.inner_product_matrix(base_point)
cometric_matrix = gs.linalg.inv(metric_matrix)
return cometric_matrix
def inner_product_derivative_matrix(self, base_point=None):
"""Compute derivative of the inner prod matrix at base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_derivative = autograd.jacobian(self.inner_product_matrix)
return metric_derivative(base_point)
def christoffels(self, base_point):
"""Compute Christoffel symbols associated with the connection.
Parameters
----------
base_point: array-like, shape=[n_samples, dimension]
Returns
-------
christoffels: array-like,
shape=[n_samples, dimension, dimension, dimension]
"""
cometric_mat_at_point = self.inner_product_inverse_matrix(base_point)
metric_derivative_at_point = self.inner_product_derivative_matrix(
base_point)
term_1 = gs.einsum('nim,nmkl->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
term_2 = gs.einsum('nim,nmlk->nilk',
cometric_mat_at_point,
metric_derivative_at_point)
term_3 = - gs.einsum('nim,nklm->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
christoffels = 0.5 * (term_1 + term_2 + term_3)
return christoffels
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Inner product between two tangent vectors at a base point.
Parameters
----------
tangent_vec_a: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
tangent_vec_b: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
base_point: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
Returns
-------
inner_product : array-like, shape=[n_samples,]
"""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2)
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2)
n_tangent_vec_a = gs.shape(tangent_vec_a)[0]
n_tangent_vec_b = gs.shape(tangent_vec_b)[0]
inner_prod_mat = self.inner_product_matrix(base_point)
inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3)
n_mats = gs.shape(inner_prod_mat)[0]
if n_tangent_vec_a != n_mats:
if n_tangent_vec_a == 1:
tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0)
einsum_str_a = 'j,njk->nk'
elif n_mats == 1:
inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0)
einsum_str_a = 'nj,jk->nk'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_a = 'nj,njk->nk'
aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat)
n_auxs, _ = gs.shape(aux)
if n_tangent_vec_b != n_auxs:
if n_auxs == 1:
aux = gs.squeeze(aux, axis=0)
einsum_str_b = 'k,nk->n'
elif n_tangent_vec_b == 1:
tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0)
einsum_str_b = 'nk,k->n'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_b = 'nk,nk->n'
inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
assert gs.ndim(inner_prod) == 2, inner_prod.shape
return inner_prod
def squared_norm(self, vector, base_point=None):
"""Compute the square of the norm of a vector.
Squared norm of a vector associated to the inner product
at the tangent space at a base point.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
sq_norm : array-like, shape=[n_samples,]
"""
sq_norm = self.inner_product(vector, vector, base_point)
return sq_norm
def norm(self, vector, base_point=None):
"""Compute norm of a vector.
Norm of a vector associated to the inner product
at the tangent space at a base point.
Note: This only works for positive-definite
Riemannian metrics and inner products.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
norm : array-like, shape=[n_samples,]
"""
sq_norm = self.squared_norm(vector, base_point)
norm = gs.sqrt(sq_norm)
return norm
def geodesic(self, initial_point,
end_point=None, initial_tangent_vec=None,
point_type='vector'):
"""Geodesic as function of t.
Geodesic curve defined by either:
- an initial point and an initial tangent vector, or
- an initial point and an end point.
The geodesic is returned as a function parameterized by t.
Parameters
----------
initial_point : array-like, shape=[n_samples, dimension]
end_point : array-like, shape=[n_samples, dimension], optional
initial_tangent_vec : array-like, shape=[n_samples, dimension],
optional
point_type : str, optional
Returns
-------
path : callable
"""
point_ndim = 1
if point_type == 'matrix':
point_ndim = 2
initial_point = gs.to_ndarray(initial_point,
to_ndim=point_ndim + 1)
if end_point is None and initial_tangent_vec is None:
raise ValueError('Specify an end point or an initial tangent '
'vector to define the geodesic.')
if end_point is not None:
end_point = gs.to_ndarray(end_point,
to_ndim=point_ndim + 1)
shooting_tangent_vec = self.log(point=end_point,
base_point=initial_point)
if initial_tangent_vec is not None:
assert gs.allclose(shooting_tangent_vec, initial_tangent_vec)
initial_tangent_vec = shooting_tangent_vec
initial_tangent_vec = gs.array(initial_tangent_vec)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=point_ndim + 1)
def path(t):
"""Generate a function parameterizing the geodesic.
Parameters
----------
t : parameter value of the geodesic
Returns
-------
point_at_time_t : callable
"""
t = gs.cast(t, gs.float32)
t = gs.to_ndarray(t, to_ndim=1)
t = gs.to_ndarray(t, to_ndim=2, axis=1)
new_initial_point = gs.to_ndarray(
initial_point,
to_ndim=point_ndim + 1)
new_initial_tangent_vec = gs.to_ndarray(
initial_tangent_vec,
to_ndim=point_ndim + 1)
if point_type == 'vector':
tangent_vecs = gs.einsum('il,nk->ik',
t,
new_initial_tangent_vec)
elif point_type == 'matrix':
tangent_vecs = gs.einsum('il,nkm->ikm',
t,
new_initial_tangent_vec)
point_at_time_t = self.exp(tangent_vec=tangent_vecs,
base_point=new_initial_point)
return point_at_time_t
return path
def squared_dist(self, point_a, point_b):
"""Squared geodesic distance between two points.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
sq_dist : array-like, shape=[n_samples,]
"""
log = self.log(point=point_b, base_point=point_a)
sq_dist = self.squared_norm(vector=log, base_point=point_a)
return sq_dist
def dist(self, point_a, point_b):
"""Geodesic distance between two points.
Note: It only works for positive definite
Riemannian metrics.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
dist : array-like, shape=[n_samples,]
"""
sq_dist = self.squared_dist(point_a, point_b)
dist = gs.sqrt(sq_dist)
return dist
def diameter(self, points):
"""Give the distance between two farthest points.
Distance between the two points that are farthest away from each other
in points.
Parameters
----------
points
Returns
-------
diameter
"""
diameter = 0.0
n_points = points.shape[0]
for i in range(n_points - 1):
dist_to_neighbors = self.dist(points[i, :], points[i + 1:, :])
dist_to_farthest_neighbor = gs.amax(dist_to_neighbors)
diameter = gs.maximum(diameter, dist_to_farthest_neighbor)
return diameter
def closest_neighbor_index(self, point, neighbors):
"""Closest neighbor of point among neighbors.
Parameters
----------
point
neighbors
Returns
-------
closest_neighbor_index
"""
dist = self.dist(point, neighbors)
closest_neighbor_index = gs.argmin(dist)
return closest_neighbor_index
| true |
c025aab5799d9f72cfd586472200cf9072a45d3f | Python | dkarlss/video-scripts | /buildpage.py | UTF-8 | 1,328 | 3.15625 | 3 | [] | no_license | #
# This script creates a html page with links using HTML5 video tags to
# all files given as arguments.
#
import sys
from string import Template
def getEpisodeName(path):
return getPathElement(path, 0)
def getPathElement(path, num):
if path == None:
return None
parts = path.split('/')
if (len(parts) - 1 - num) < 0:
return "unknown"
return parts[len(parts) -1 - num]
def writeHtmlFile(file, uri, name):
with open(file, 'w') as f:
s = Template("<video src=\"$uri_addr\" controls=\"controls\"> your browser does not support the video tag </video>")
uri_str = s.substitute(uri_addr=uri, uri_name=name)
f.write(uri_str)
def writeLink(file, uri, name):
s = Template('<a href="$uri_addr">$uri_name</a>')
uri_str = s.substitute(uri_addr=uri, uri_name=name)
file.write(uri_str)
def writeIndexHtmlFile(filelist):
with open('index.html', 'w') as f:
with open(filelist) as f2:
for path in f2:
ename = getEpisodeName(path)
ename = ename.strip()
html_fname = ename + '.html'
writeLink(f, html_fname, ename)
writeHtmlFile(html_fname, path, ename)
f.write("<BR>\n")
f.write("</body></html>")
writeIndexHtmlFile(sys.argv[1])
| true |
f515b0ec5758cda4630509b8e10bf56620aea139 | Python | trevorw22/Sentiment-Analysis-Scripts | /nltk/stemming.py | UTF-8 | 1,077 | 4.375 | 4 | [] | no_license | # Stemming is a form of preprocessing, we take the root word. ex. ridding is not rid (or rode)
# We can have several variations of words with the same meaning
# Word net is a newer program that can do the same thing as stemming but better
# The actual meaning of a word is unchanged because the root words are the same. See example below:
# I was taking a ride in the car.
# I was riding in the car.
# These mean the same thing, but the first one takes up alot more space in a database.
# Natural Language Processing has an algorithm that's been around since 1979 called the Porter Stemmer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
ps = PorterStemmer()
example_words = ["python", "pythoner", "pythoning", "pythoned", "pythonly"]
# for w in example_words:
# print(ps.stem(w))
# Output:
# python
# python
# python
# python
# pythonli
new_text = "It is very important to be pythonly while you are pythoning with python. All pythoners have pythoned poorly at least once."
words = word_tokenize(new_text)
for w in words:
print(ps.stem(w)) | true |
6bf28cf77950f00d3c13784f78f98f9df92f12f6 | Python | MrNewaz/Learning-Dgango | /Done/lesson 2/nones.py | UTF-8 | 82 | 2.71875 | 3 | [] | no_license | walet = None
if walet is None:
print('Shetaii')
walet = 98
print(walet)
| true |
7ef73839d159a50f4c408e7173ecfe20a467def5 | Python | CBermingham/Photonic_Force | /psd_graph.py | UTF-8 | 611 | 2.828125 | 3 | [] | no_license | import matplotlib.pyplot as plt
time = []
psd = []
fit = []
f = open('psddata', 'rU')
lines=f.readlines()
f.close()
for l in lines:
b = l.split()
time.append(float(b[0]))
psd.append(float(b[1]))
fit.append(float(b[2]))
plt.scatter(time, psd, color='c', s=1, label='experiment')
plt.plot(time, fit, color='purple', label='fit')
plt.xlabel("Frequency (Hz)")
plt.ylabel("Power spectral density (m$^2$Hz$^{-1}$)")
plt.xscale('log')
plt.yscale('log')
plt.ylim(ymax=1E-18)
plt.legend()
leg = plt.legend()
leg.get_frame().set_edgecolor('white')
plt.savefig('psd_figure.pdf')
plt.show() | true |
dd70f6a83bcce7466d1086d58041e39f22719a2a | Python | DamocValentin/PythonLearning | /PasswordGenerator.py | UTF-8 | 722 | 4.375 | 4 | [] | no_license | # Write a programme, which generates a random password for the user.
# Ask the user how long they want their password to be,
# and how many letters and numbers they want in their password.
# Have a mix of upper and lowercase letters, as well as numbers and symbols.
# The password should be a minimum of 6 characters long.
import random
import string
def __main__():
while True:
password_length = int(input("How many characters do you want in your password? "))
if password_length >= 6:
break
print("Too short")
characters = string.ascii_letters + string.digits + string.punctuation
print(''.join(random.choice(characters) for i in range(password_length)))
__main__()
| true |
76977e29416ca1e5a1edc4f46d346c96def86b1b | Python | zafodB/aoc2020 | /aoc06.2.py | UTF-8 | 521 | 3.25 | 3 | [] | no_license | with open('6.1.input.txt', 'r', encoding='utf8') as input_file:
input_lines = input_file.readlines()
questions_sum = 0
group = set()
new_group = True
for line in input_lines:
if line == "\n":
this_group_questions = len(group)
questions_sum += this_group_questions
new_group = True
continue
if new_group:
group = set(list(line.strip()))
new_group = False
else:
group = group.intersection(set(list(line.strip())))
print(questions_sum)
| true |
04eaf8b81b8af8c025c2f610310fc62e3fddf398 | Python | azguen/My_100_Days_Of_Python | /58-60-twitter-api/my-code/Twitter_API/ask_user.py | UTF-8 | 218 | 3.328125 | 3 | [
"MIT"
] | permissive | def ask_user():
print("Do you want to save?")
response = ''
while response.lower() not in {"yes", "no"}:
response = input("Please enter yes or no: ")
return response.lower() == "yes"
ask_user() | true |
7fd9f8b6a38a2e5b156ab89da27d66e21c71a2cf | Python | houfire/iotedgedev | /iotedgedev/solution.py | UTF-8 | 1,213 | 2.796875 | 3 | [
"MIT"
] | permissive | import os
import zipfile
class Solution:
def __init__(self, output, utility):
self.output = output
self.utility = utility
def create(self, name):
if name == ".":
dir_path = os.getcwd()
else:
dir_path = os.path.join(os.getcwd(), name)
if not self.utility.is_dir_empty(dir_path):
self.output.prompt("Directory is not empty. Run 'iotedgedev azure' or clean the directory.")
return
self.output.header("CREATING AZURE IOT EDGE SOLUTION: {0}".format(name))
try:
template_zip = os.path.join(os.path.split(
__file__)[0], "template", "template.zip")
except Exception as ex:
self.output.error("Error while trying to load template.zip")
self.output.error(str(ex))
if name == ".":
name = ""
zipf = zipfile.ZipFile(template_zip)
zipf.extractall(name)
os.rename(os.path.join(name, ".env.tmp"), os.path.join(name, ".env"))
self.output.footer("Azure IoT Edge Solution Created")
if name != "":
self.output.info("Execute 'cd {0}' to navigate to your new solution.".format(name))
| true |
5531e802e6e0131bfab313bbb6fe0f400f8fc8d2 | Python | HuichuanLI/Recommand-Algorithme | /FM/deepfm/deepfm_movielens_sample.py | UTF-8 | 1,684 | 2.9375 | 3 | [] | no_license | import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from deepctr.models import DeepFM
from deepctr.inputs import SparseFeat,get_feature_names
#数据加载
data = pd.read_csv("movielens_sample.txt")
sparse_features = ["movie_id", "user_id", "gender", "age", "occupation", "zip"]
target = ['rating']
# 对特征标签进行编码
for feature in sparse_features:
lbe = LabelEncoder()
data[feature] = lbe.fit_transform(data[feature])
# 计算每个特征中的 不同特征值的个数
fixlen_feature_columns = [SparseFeat(feature, data[feature].nunique()) for feature in sparse_features]
print(fixlen_feature_columns)
linear_feature_columns = fixlen_feature_columns
dnn_feature_columns = fixlen_feature_columns
feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# 将数据集切分成训练集和测试集
train, test = train_test_split(data, test_size=0.2)
train_model_input = {name:train[name].values for name in feature_names}
test_model_input = {name:test[name].values for name in feature_names}
# 使用DeepFM进行训练
model = DeepFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile("adam", "mse", metrics=['mse'], )
history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=1, verbose=True, validation_split=0.2, )
# 使用DeepFM进行预测
pred_ans = model.predict(test_model_input, batch_size=256)
# 输出RMSE或MSE
mse = round(mean_squared_error(test[target].values, pred_ans), 4)
rmse = mse ** 0.5
print("test RMSE", rmse) | true |
716f00e47675cf8dfc270ec58df9a4701e42b2d3 | Python | qiliu08/frequency-refinement-defense | /helper_functions.py | UTF-8 | 4,408 | 2.734375 | 3 | [] | no_license |
import numpy as np
from PIL import Image
import os
import copy
import cv2
import torch
def save_input_image(modified_im, im_name, folder_name='result_images', save_flag=True):
"""
Discretizes 0-255 (real) image from 0-1 normalized image
"""
modified_copy = copy.deepcopy(modified_im)[0]
modified_copy = ((modified_copy * 0.5) + 0.5)
modified_copy = modified_copy * 255
# Box constraint
modified_copy[modified_copy > 255] = 255
modified_copy[modified_copy < 0] = 0
modified_copy = modified_copy.transpose(1, 2, 0)
modified_copy = modified_copy.astype('uint8')
modified_copy = cv2.cvtColor(modified_copy, cv2.COLOR_RGB2BGR)
if save_flag:
save_image(modified_copy, im_name, folder_name)
return modified_copy
def save_prediction_image(pred_out, im_name, folder_name='result_images'):
"""
Saves the prediction of a segmentation model as a real image
"""
# Disc. pred image
pred_img = copy.deepcopy(pred_out)
pred_img = pred_img * 255
pred_img[pred_img > 127] = 255
pred_img[pred_img <= 127] = 0
save_image(pred_img, im_name, folder_name)
def save_image_difference(org_image, perturbed_image, im_name, folder_name='result_images'):
"""
Finds the absolute difference between two images in terms of grayscale plaette
"""
# Process images
im1 = save_input_image(org_image, '', '', save_flag=False)
im2 = save_input_image(perturbed_image, '', '', save_flag=False)
# Find difference
diff = np.abs(im1 - im2)
# # calculate average
avg = np.mean(diff)
# max = np.amax(diff)
# print('perturbation max: ', max)
# Sum over channel
diff = np.sum(diff, axis=2)
# Normalize
diff_max = np.max(diff)
diff_min = np.min(diff)
diff = np.clip((diff - diff_min) / (diff_max - diff_min), 0, 1)
# Enhance x 120, modify this according to your needs
diff = diff*120
diff = diff.astype('uint8')
save_image(diff, im_name, folder_name)
def save_image(im_as_arr, im_name, folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
image_name_with_path = folder_name + '/' + str(im_name) + '.png'
# pred_img = Image.fromarray(im_as_arr)
# print(im_as_arr.shape)
#pred_img.save(image_name_with_path)
cv2.imwrite(image_name_with_path, im_as_arr)
def load_model(path_to_model):
"""
Loads pytorch model from disk
"""
model = torch.load(path_to_model)
#model = torch.load(path_to_model, map_location=lambda storage, loc: storage)
return model
def get_JS(SR, GT, threshold=0.5):
# JS : Jaccard similarity
SR = torch.from_numpy(SR)
GT = torch.from_numpy(GT)
SR[SR > threshold] = 1
SR[SR <= threshold] = 0
# print(torch.sum(SR[SR > 0.5]))
GT = GT == torch.max(GT)
# print(torch.sum(GT[GT==1]))
Inter = torch.sum((SR + GT) == 2)
Union = torch.sum((SR + GT) >= 1)
JS = float(Inter) / (float(Union) + 1e-6)
# Calculate pixel accuracy
# correct_pixels = SR == GT
# pixel_acc = np.sum(correct_pixels) / (correct_pixels.shape[0] * correct_pixels.shape[1])
return JS
def get_PX_accuracy(SR,GT,threshold=0.5):
SR = torch.from_numpy(SR)
GT = torch.from_numpy(GT)
#SR[SR > threshold] = 1
SR = SR > threshold
GT = GT == torch.max(GT)
corr = torch.sum(SR==GT)
tensor_size = SR.size(0)*SR.size(1)*SR.size(2)*SR.size(3)
acc = float(corr)/float(tensor_size)
return acc
def calculate_mask_similarity(mask1, mask2):
"""
Calculates IOU and pixel accuracy between two masks
"""
# Calculate IoU
mask1[mask1 > 0.5] = 1
mask1[mask1 <= 0.5] = 0
intersection = mask1 * mask2
union = mask1 + mask2
# Update intersection 2s to 1
union[union > 1] = 1
iou = np.sum(intersection) / np.sum(union)
# Calculate pixel accuracy
correct_pixels = mask1 == mask2
pixel_acc = np.sum(correct_pixels) / (correct_pixels.shape[2]*correct_pixels.shape[3])
return (iou, pixel_acc)
def calculate_image_distance(im1, im2):
"""
Calculates L2 and L_inf distance between two images
"""
# Calculate L2 distance
l2_dist = torch.dist(im1, im2, p=2).item()
# Calculate Linf distance
diff = torch.abs(im1 - im2)
diff = torch.max(diff, dim=2)[0] # 0-> item, 1-> pos
linf_dist = torch.max(diff).item()
return l2_dist, linf_dist
| true |
ad6143e8059c0c6fa6b8d480f47100c147182b10 | Python | here0009/LeetCode | /Python/1478_AllocateMailboxes.py | UTF-8 | 2,992 | 3.859375 | 4 | [] | no_license | """
Given the array houses and an integer k. where houses[i] is the location of the ith house along a street, your task is to allocate k mailboxes in the street.
Return the minimum total distance between each house and its nearest mailbox.
The answer is guaranteed to fit in a 32-bit signed integer.
Example 1:
Input: houses = [1,4,8,10,20], k = 3
Output: 5
Explanation: Allocate mailboxes in position 3, 9 and 20.
Minimum total distance from each houses to nearest mailboxes is |3-1| + |4-3| + |9-8| + |10-9| + |20-20| = 5
Example 2:
Input: houses = [2,3,5,12,18], k = 2
Output: 9
Explanation: Allocate mailboxes in position 3 and 14.
Minimum total distance from each houses to nearest mailboxes is |2-3| + |3-3| + |5-3| + |12-14| + |18-14| = 9.
Example 3:
Input: houses = [7,4,6,1], k = 1
Output: 8
Example 4:
Input: houses = [3,6,14,10], k = 4
Output: 0
Constraints:
n == houses.length
1 <= n <= 100
1 <= houses[i] <= 10^4
1 <= k <= n
Array houses contain unique integers.
"""
from functools import lru_cache
class Solution:
def minDistance(self, houses, k: int) -> int:
@lru_cache(None)
def dp(k, i):
if k == 0 and i == length:
return 0
if k == 0 or i == length:
return float('inf')
res = float('inf')
for j in range(i, length):
res = min(res, costs[i][j] + dp(k-1, j+1)) ## Try to put a mailbox among house[i:j]
return res
length = len(houses)
houses = sorted(houses)
costs = [[0]*length for _ in range(length)]
for i in range(length):
for j in range(i+1, length):
median = houses[(i+j)//2]
for t in range(i, j+1):
costs[i][j] += abs(houses[t] - median)
costs[j][i] = costs[i][j]
return dp(k, 0)
from functools import lru_cache
class Solution:
def minDistance(self, houses, k: int) -> int:
@lru_cache(None)
def dp(k, i):
if k == length-i:
return 0
if k > length-i or k == 0:
return float('inf')
res = float('inf')
for j in range(i, length):
res = min(res, costs[i][j] + dp(k-1, j+1)) ## Try to put a mailbox among house[i:j]
return res
length = len(houses)
houses = sorted(houses)
costs = [[0]*length for _ in range(length)]
for i in range(length):
for j in range(i+1, length):
median = houses[(i+j)//2]
for t in range(i, j+1):
costs[i][j] += abs(houses[t] - median)
costs[j][i] = costs[i][j]
return dp(k, 0)
S = Solution()
houses = [1,4,8,10,20]
k = 3
print(S.minDistance(houses, k))
houses = [2,3,5,12,18]
k = 2
print(S.minDistance(houses, k))
houses = [7,4,6,1]
k = 1
print(S.minDistance(houses, k))
houses = [3,6,14,10]
k = 4
print(S.minDistance(houses, k)) | true |
133c1e06527e9621b9bd80524c2ad148d7d6e8df | Python | marcvifi10/Curso-Python | /Python 2/20 - Expresiones regulares/69 - Expresiones regulares I. Vídeo 69.py | UTF-8 | 951 | 4 | 4 | [] | no_license | import re
cadena="Vamos a aprender expresiones regulares"
print(re.search("aprender",cadena))
print(re.search("aprenderrr",cadena))
textoBuscar = "aprender"
# Buscar el textoBuscar dentro de cadena
if re.search(textoBuscar, cadena) is not None:
print("He encontrado el texto")
else:
print("No he encontrado el texto")
textoEncontrado = re.search(textoBuscar,cadena)
# Numero de caracter donde empieza a encontrar el textoBuscar
print(textoEncontrado.start())
# Numero de caracter donde acaba a encontrar el textoBuscar
print(textoEncontrado.end())
# Devuelve los dos valores anteriores, donde empieza y donde finaliza la palabra o texto a buscar
print(textoEncontrado.span())
# Devuelve los strings que hemos buscado en una lista
# Si lo encuentra dos veces, lo mostrara dos veces
print(re.findall(textoBuscar,cadena))
# Mostrar las veces repetidas que hay el textoBuscar dentro de la cadena
print(len(re.findall(textoBuscar,cadena))) | true |
0da6016f3bdc93f61cec5bbced2f1f75ce4d18e1 | Python | TheHistorian/DanRTS | /Window.py | UTF-8 | 1,111 | 3.171875 | 3 | [] | no_license | import pygame, sys
class Window:
def __init__(self, title, height):
self.title = title
self.height = height
self.width = int(height * (16.0 / 9.0)) # 16:9 aspect ratio
self.screen = pygame.display.set_mode((self.width, self.height), 0, 32)
pygame.display.set_caption(title)
self.background = pygame.rect.Rect(0, 0, self.width, self.height)
self.images = []
def add(self, image):
self.images.append(image)
image.dirty = True
def draw(self):
pygame.draw.rect(self.screen, (0, 0, 0), self.background)
for image in self.images:
if image.dirty:
self.screen.blit(image.image, image.location)
pygame.display.update()
def events(self):
for event in pygame.event.get():
if(event.type == pygame.QUIT
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)):
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.images[0].move_rel(0, -5)
if keys[pygame.K_s]:
self.images[0].move_rel(0, 5)
if keys[pygame.K_a]:
self.images[0].move_rel(-5, 0)
if keys[pygame.K_d]:
self.images[0].move_rel(5, 0)
| true |
a0f537231c07304393411e054f80ee53e3f0aeaf | Python | superfk/opeCV-demo | /main.py | UTF-8 | 8,533 | 2.703125 | 3 | [] | no_license | import numpy as np
import cv2 as cv
import glob
import matplotlib.pyplot as plt
from PIL import Image
from baslerCam import Cam
def basic_numpy():
new_list = [1, 2, 3]
print(type(new_list))
np_arr = np.array(new_list)
print(np_arr)
print(type(np_arr))
print(np.max(new_list))
zeros_1D = np.zeros(10)
print(zeros_1D)
zeros_2D = np.zeros((10,5))
print(zeros_2D)
zeros_3D = np.zeros((3,10,5))
print(zeros_3D)
mat = np.random.randint(0,2,100).reshape((10,10))
print(mat)
sub = mat[:,2]
print(sub)
sub = mat[0,:]
print(sub)
sub = mat[0:2,1:3]
print(sub)
def read_write_image():
pic = Image.open('data/00-puppy.jpg')
pic_arr = np.asarray(pic)
print(pic_arr.shape)
# plt.imshow(pic_arr)
# plt.show()
# sub_img = pic_arr[400:1000, 500:1500, 1]
# plt.imshow(sub_img)
# plt.show()
img = cv.imread('data/00-puppy.jpg')
pic_arr = cv.resize(pic_arr, (300, 300))
cv.imshow('Puppy_rbg', pic_arr)
cv.waitKey(0)
img = cv.resize(img, (0, 0),None,0.2,0.2)
cv.imshow('Puppy_bgr', img)
cv.waitKey(0)
cv.destroyAllWindows()
# if image is read from PIL, the channel is RGB, so the image needs to convert channel order to BGR
new_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
print(new_img.shape)
cv.imshow('Puppy_bgr_new', new_img)
cv.waitKey(0)
cv.imwrite('my_new_picture.jpg',new_img)
def calibration_realtime():
col = 9
row = 6
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 0.1)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((row*col,3), np.float32)
objp[:,:2] = np.mgrid[0:col,0:row].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
# images = glob.glob('*.jpg')
cam = Cam()
cam.open()
counts = 0
while True:
img = cam.snap()
img_proc = cv.resize(img, (0, 0),None,0.4,0.4)
gray = cv.cvtColor(img_proc, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (col,row), None)
# If found, add object points, image points (after refining them)
if ret == True:
# objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
# imgpoints.append(corners)
# Draw and display the corners
cv.drawChessboardCorners(img_proc, (col,row), corners2, ret)
# gray = cv.resize(gray, (0, 0),None,0.2,0.2)
# cv.imshow('img', gray)
cv.imshow('img', img_proc)
k = cv.waitKey(50)
if k==27: # Esc key to stop
break
elif k==13: # normally -1 returned,so don't print it
cv.imwrite(f'data\calibImg\img_{counts}.jpg',img)
counts += 1
continue
else:
pass
cv.destroyAllWindows()
cam.close()
def calibration():
col = 9
row = 6
interval = 11 # mm
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 0.1)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((row*col,3), np.float32)
objp[:,:2] = np.mgrid[0:col,0:row].T.reshape(-1,2) * interval
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('data/calibImg/*.jpg')
for fname in images:
print(f"fname: {fname}")
img = cv.imread(fname)
img = cv.resize(img, (0, 0),None,0.4,0.4)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# cv.imshow('gray', gray)
# cv.waitKey(500)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (col,row), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv.drawChessboardCorners(img, (col,row), corners2, ret)
cv.imshow('img', img)
cv.waitKey(500)
cv.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
print(f"ret: {ret}")
print(f"mtx: {mtx}")
print(f"dist: {dist}")
print(f"rvecs: {rvecs}")
print(f"tvecs: {tvecs}")
return mtx, dist
def undistort(mtx, dist):
cam = Cam()
cam.open()
img = cam.snap()
img = cv.resize(img, (0, 0),None,0.4,0.4)
# img = cv.imread('left12.jpg')
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
# undistort
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('calibresult.png', dst)
cam.close()
def remap(mtx, dist):
print(f"mtx: {mtx}")
cam = Cam()
cam.open()
img = cam.snap()
img = cv.resize(img, (0, 0),None,0.4,0.4)
# img = cv.imread('left12.jpg')
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
print(f"newcameramtx: {newcameramtx}")
# undistort
mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
cv.imwrite('calibresult.png', dst)
cam.close()
def resolution(mtx, dist):
cam = Cam()
cam.open()
img = cam.snap()
cam.close()
img = cv.resize(img, (0, 0),None,0.4,0.4)
# img = cv.imread('left12.jpg')
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
print(f"newcameramtx: {newcameramtx}")
# undistort
mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5)
dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR)
# crop the image
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
col = 9
row = 6
interval = 11 # mm
# termination criteria
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 0.1)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((row*col,3), np.float32)
objp[:,:2] = np.mgrid[0:col,0:row].T.reshape(-1,2) * interval
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (col,row), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv.drawChessboardCorners(dst, (col,row), corners2, ret)
cv.imshow('img', dst)
cv.waitKey()
cv.destroyAllWindows()
count = 0
lastObj = None
lastImg = None
mmPerPxs = []
for objPoint, imgPoint in zip(objpoints[0], imgpoints[0]):
if count == 0:
lastObj = objPoint
lastImg = imgPoint
else:
distObj = objPoint -lastObj
distImg = imgPoint[0] -lastImg[0]
if distObj[0] > 0:
mmPerPx = distObj[0] / np.sqrt(distImg[0]*distImg[0] + distImg[1]*distImg[1]) * 0.4
mmPerPxs.append(mmPerPx)
print(f"distObj: {distObj}")
print(f"distImg: {distImg}")
print(f"mm per px: {mmPerPx}")
lastObj = objPoint
lastImg = imgPoint
count += 1
avgRes = np.average(mmPerPxs)
stdRes = np.std(mmPerPxs)
print("")
print(f"mm per px list: {mmPerPxs}")
print("")
print(f"resolution average: {avgRes}")
print("")
print(f"resolution std: {stdRes}")
return avgRes
if __name__=='__main__':
mtx, dist = calibration()
resolution(mtx, dist)
| true |
49b80548762e0414fa66bd66bd12f5010cdcef26 | Python | saltchang/caten-music | /caten_music/models/users_profile.py | UTF-8 | 783 | 2.5625 | 3 | [] | no_license | # models/users_profile.py
from flask import current_app
from .base import db
from caten_music import helper
class UserProfile(db.Model):
# SQL Table Name
__tablename__ = "users_profile"
# 資料欄位設定
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'),
nullable=False)
about_me = db.Column(db.Text)
phone_number = db.Column(db.String(20))
sex = db.Column(db.String(8))
# 預註冊到資料庫(特殊用途)
def flush(self):
db.session.add(self)
db.flush()
# 註冊到資料庫
def save(self):
db.session.add(self)
db.session.commit()
# 自身物件表示
def __repr__(self):
return '<ID: %s>' % (self.id)
| true |
6961d0c6fc66e4b4536c8d3e70b0c67a18c88563 | Python | katepisova/Python-1 | /task-5.py | UTF-8 | 430 | 2.890625 | 3 | [] | no_license | import sys, cmath
try:
outfilename = sys.argv[1]
except:
print("Usage:", sys.argv[0], "outfile")
sys.exit(1)
ofile = open(outfilename, 'w')
def myfunc(y):
if y >= 0.0:
return y ** 5 * cmath.exp(-y)
else:
return 0.0
i = 2
while i < sys.argv.__len__():
x = float(sys.argv[i])
y = float(sys.argv[i + 1])
fy = myfunc(y)
ofile.write('(%g, %g)\n' % (x, fy))
i += 2
ofile.close()
| true |
15df56df7a2c07b3c3a040315b6b2fc953f587ce | Python | ynvtlmr/Fundamentals | /day_03/cats_and_birds/game.py | UTF-8 | 488 | 3.96875 | 4 | [] | no_license | from cat import Cat
from bag import Bag
from bird import Bird
# create a cat
print("Creating an instance of a cat")
cat1 = Cat("Felix")
print("The cat's name is {}.".format(cat1.name))
# create a bag
print("Creating an instance of a bag")
bag1 = Bag(10)
print("The bag's size is {}.".format(bag1.size))
# create a bird
print("Creating an instance of a bird")
bird1 = Bird("Foghorn Leghorn", 5)
print("The bird's name is {}, and its size is {}".format(bird1.name, bird1.size)) | true |
11a2389763379204bf4309de9efc7e0f062b9bf2 | Python | marceluphd/IL-LORA1272 | /Sample-Java/01-command/ap8-reciver.py | UTF-8 | 889 | 2.921875 | 3 | [] | no_license | import serial
#ser = serial.Serial ("/dev/ttyAMA0") #Open named port
ser = serial.Serial ("/dev/tty.usbserial")
ser.baudrate = 115200 #Set baud rate to 9600
#data = ser.read(10) #Read ten characters from serial port to data
#ser.write("you send:")
#ser.write(data) #Send back the received data
ser.write(serial.to_bytes([0x80,0x00,0x00]))
data = ser.read(6)
#print(data)
print data.encode('hex')
ser.write(serial.to_bytes([0xc1,0x01,0x00]))
data = ser.read(5)
print data.encode('hex')
ser.write(serial.to_bytes([0xc1,0x02,0x00]))
data = ser.read(12)
print data.encode('hex')
ser.write(serial.to_bytes([0xc1,0x03,0x05,0x03,0xe4,0xc0,0x00,0x03]))
data = ser.read(5)
print data.encode('hex')
ser.write(serial.to_bytes([0xc1,0x06,0x00]))
data = ser.read(7)
print ("reciver:")
print data.encode('hex')
ser.close()
| true |
3031d69377eaa691741201af218b822dc0ba3646 | Python | CE4301-ACI-IEE/proyecto-1 | /tools/create_mem_kernel_mif.py | UTF-8 | 1,354 | 2.78125 | 3 | [] | no_license | # Date: 04/17/18
print( "Starting..." )
import os
OUTPUT_BITS = 16
DEPTH = 9
# convert to signed dec to hex
def tohex( val, nbits ):
a = hex((val+(1<<nbits))%(1<<nbits))
return a[2::]
# Convert the matrix given as mem_kernel.mif file
# kernel blur (box blur)
#kernel_matrix = [ [1,1,1], [1,1,1], [1,1,1] ]
# kernel sharpen
kernel_matrix = [ [0,-1,0], [-1,5,-1], [0,-1,0] ]
# kernel edge detection
#kernel_matrix = [ [-1,-1,-1], [-1,-8,-1], [-1,-1,-1] ]
# create mem_kernel.mif file
this_file_dir = os.path.dirname(os.path.realpath('__file__')) # Direction of this file
output_file_dir = "../cpu_pipelined/hdl/mem_kernel.mif" # Direction of output file
path = os.path.join( this_file_dir, output_file_dir ) # Create a relative path
f = open( path, "w+" )
f.write( "WIDTH=%d;\n" % OUTPUT_BITS )
f.write( "DEPTH=%d;\n\n" % DEPTH )
f.write( "ADDRESS_RADIX=HEX;\n" )
f.write( "DATA_RADIX=HEX;\n\n" )
f.write( "CONTENT BEGIN\n" )
d = kernel_matrix
aux_address = 0
for i in range( len(d) ):
for j in range( len(d[i]) ):
val = d[i][j];
f.write( "\t%d\t:\t" % aux_address )
f.write( "%s;\n" % tohex( val, OUTPUT_BITS ).zfill(4) )
aux_address += 1
f.write( "END;" )
f.close() # close file
print( "Finish.OK" )
print( "See 'cpu_pipelined/hdl/mem_kernel.mif'." )
# fisnish the program | true |
759f68552910b1c1602699078a67c69cd7944036 | Python | Aasthaengg/IBMdataset | /Python_codes/p03041/s384935089.py | UTF-8 | 93 | 2.71875 | 3 | [] | no_license | A=list(map(int,input().split()))
S=input()
K=A[1]
s=S[K-1]
s=s.lower()
print(S[:K-1]+s+S[K:]) | true |
3c45501a109ce1f870eaeb20ee0c89a9152aa806 | Python | batmanuel-sandbox/pipe_supertask | /python/lsst/pipe/supertask/graph.py | UTF-8 | 3,297 | 2.578125 | 3 | [] | no_license | #
# LSST Data Management System
# Copyright 2017-2018 AURA/LSST.
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <http://www.lsstcorp.org/LegalNotices/>.
#
"""Module defining quantum graph classes and related methods.
There could be different representations of the quantum graph depending
on the client needs. Presently this module contains graph implementation
which is based on requirements of command-line environment. In the future
we could add other implementations and methods to convert between those
representations.
"""
from __future__ import absolute_import, division, print_function
# "exported" names
__all__ = ["QuantumGraphNodes", "QuantumGraph"]
# -------------------------------
# Imports of standard modules --
# -------------------------------
# -----------------------------
# Imports for other modules --
# -----------------------------
# ----------------------------------
# Local non-exported definitions --
# ----------------------------------
# ------------------------
# Exported definitions --
# ------------------------
class QuantumGraphNodes(object):
"""QuantumGraphNodes represents a bunch of nodes in an quantum graph.
The node in quantum graph is represented by the `SuperTask` and a single
`Quantum` instance. One possible representation of the graph is just a
list of nodes without edges (edges can be deduced from nodes' quantum
inputs and outputs if needed). That representation can be reduced to
the list of SuperTasks and the corresponding list of Quanta.
This class defines this reduced representation.
Different frameworks may use different graph representation, this
representation was based mostly on requirements of command-line
executor which does not need explicit edges information.
Attributes
----------
taskDef : :py:class:`TaskDef`
Task defintion for this set of nodes.
quanta : `list` of :py:class:`lsst.daf.butler.Quantum`
List of quanta corresponding to the task.
"""
def __init__(self, taskDef, quanta):
self.taskDef = taskDef
self.quanta = quanta
class QuantumGraph(list):
"""QuantumGraph is a sequence of QuantumGraphNodes objects.
Typically the order of the tasks in the list will be the same as the
order of tasks in a pipeline (obviously depends on the code which
constructs graph).
Parameters
----------
iterable : iterable of :py:class:`QuantumGraphNodes` instances, optional
Initial sequence of per-task nodes.
"""
def __init__(self, iterable=None):
list.__init__(self, iterable or [])
| true |
2e9aee301e3645d5d2e5d06c12c6d8d14083d4d8 | Python | podhmo/htmlpp | /htmlpp/lexer.py | UTF-8 | 1,730 | 2.75 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from .utils import parse_attrs, create_html_tag_regex
class Token(object):
pass
class Open(Token):
def __init__(self, name, attrs):
self.name = name
self.attrs = attrs
def __repr__(self):
return '<@{} at {}>'.format(self.name, hex(id(self)))
class OpenClose(Token):
def __init__(self, name, attrs):
self.name = name
self.attrs = attrs
def __repr__(self):
return '<@{} at {}/>'.format(self.name, hex(id(self)))
class Close(Token):
def __init__(self, name):
self.name = name
def __repr__(self):
return '</@{} at {}>'.format(self.name, hex(id(self)))
class Lexer(object):
def __init__(self, prefix="@", parse_attrs=parse_attrs):
self.parse_attrs = parse_attrs
self.scanner = create_html_tag_regex(prefix=prefix)
def dispatch(self, m):
gs = m.groups()
if gs[0]:
return Close(gs[1])
elif gs[-1]:
return OpenClose(gs[1], self.parse_attrs(gs[2]))
else:
return Open(gs[1], self.parse_attrs(gs[2]))
def add(self, buf, x):
if x:
buf.append(x)
def __call__(self, body):
buf = []
body = body.strip()
search = self.scanner.scanner(body).search
m = None
end = 0
while True:
prev_m, m = m, search()
if m is None:
if prev_m is None:
self.add(buf, body)
else:
self.add(buf, prev_m.string[prev_m.end():])
return buf
self.add(buf, m.string[end:m.start()])
self.add(buf, self.dispatch(m))
end = m.end()
| true |
1ddc5d030ec5e5085d14154b21e96c2f77e3ae05 | Python | Sai-Sumanth-D/MyProjects | /Guess Location Game.py | UTF-8 | 526 | 4.15625 | 4 | [] | no_license | #USING A WHILE LOOP FOR BUILDING A VERY SIMPLE GUESSING GAME.
answer_location = "HYDERABAD"
guess_count = 0
guess_limit = 3
#here we are using the while loop with the required condition
while guess_count < guess_limit:
guess =input('Where is Charminar located in India? : ')
#WE ARE USING THE BELOW LINE TO COUNT THE NUMBER OF ATTEMPTS
guess_count += 1
if guess.upper() == answer_location :
print ('YOUR ANSWER WAS CORRECT')
break
else :
print('SORRY YOU FAILED TO ANSWER!!')
| true |
0a9bdfdc9abf985d9b4fd89635bc4e3e8e23435a | Python | Vovenberg/AnekBot_telegram | /dao/topPostsDao.py | UTF-8 | 1,816 | 3 | 3 | [] | no_license | import sqlite3
from dto.post import Post
class TopPostsDao:
def __init__(self):
self.con = sqlite3.connect('topPosts.db')
self.cursor = self.con.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS post (id INT PRIMARY KEY, text TEXT, likes INT)")
def create_single(self, post):
try:
query = f"INSERT INTO post(id,text,likes) VALUES ({post.id},'{post.text}',{post.likes})"
executed = self.cursor.execute(query)
self.con.commit()
return executed.fetchall()
except sqlite3.OperationalError as e:
pass
def select_single(self, id):
return convertToPostArray(self.cursor.execute(f'SELECT * FROM post where id = {id}').fetchall())
def select_random_single(self):
return convertToPostArray(self.cursor.execute('SELECT * FROM post ORDER BY RANDOM() LIMIT 1').fetchall())
def select_all(self):
return convertToPostArray(self.cursor.execute('SELECT * FROM post').fetchall())
def selectId_all(self):
return convertToPostArray(self.cursor.execute('SELECT id FROM post').fetchall())
def select_few(self, ids):
results = []
for id in ids:
results.extend(self.select_single(id))
return results
def create_few(self, posts):
for post in posts:
self.create_single(post)
def post_not_exist(self, id):
if (self.select_single(id) is None):
return True
else:
return False
def count_posts(self):
return len(self.select_all())
def close(self):
self.con.close()
def convertToPostArray(resultArray):
listPosts = []
for result in resultArray:
listPosts.append(Post(result[0], result[1], result[2]))
return listPosts
| true |
45beed09e5392355ab8ecd4a510a2be32933f55f | Python | Nick-SDB/XXQG_bot | /wechat_ftqq.py | UTF-8 | 1,018 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- encoding=utf8 -*-
import datetime
import json
import requests
def sendWechat(text='', desp=''):
sc_key = 'SCU87767Tb88d67f50de1408fdc46c93967c23a715e5f18618ecd9'
if not text.strip():
print('Text of message is empty!')
return
now_time = str(datetime.datetime.now())
desp = '[{0}]'.format(now_time) if not desp else '{0} [{1}]'.format(desp, now_time)
try:
resp = requests.get(
'https://sc.ftqq.com/{}.send?text={}&desp={}'.format(sc_key, text, desp)
)
resp_json = json.loads(resp.text)
if resp_json.get('errno') == 0:
print('Message sent successfully [text: {}, desp: \n{}]'.format(text, desp))
else:
print('Fail to send message, reason: %s', resp.text)
except requests.exceptions.RequestException as req_error:
print('Request error: %s', req_error)
except Exception as e:
print('Fail to send message [text: {}, desp: {}]: {}'.format(text, desp, e))
| true |
025f6fff72e1c9462f82e805e30d8d049eb35990 | Python | abhiramsingh0/karplus_strong_algorithm | /ks.py | UTF-8 | 699 | 2.953125 | 3 | [] | no_license | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.io.wavfile import write
from playsound import playsound as ps
def KS_1(x, N):
# given the initial buffer x, produce a N-sample output
# by concatenating identical copies of the buffer
y = x
while len(y) < N:
# keep appending until we reach or exceed the required length
y = np.append(y, x)
# trim the excess
y = y[0:N+1]
return y
Fs = 16000 # 16 KHz sampling rate
b = np.random.randn(50)
plt.stem(b);
y = KS_1(b, Fs * 1)
# we can look at a few periods:
plt.stem(y[0:500]);
scaled = np.int16(y/np.max(np.abs(y)) * 32767)
write('test.wav', 44100, scaled)
ps('test.wav')
| true |
1836b7e50186d65e8f16c47060175b6ebfb837c9 | Python | Kang-Hoon/Python_reviews | /190426_lec/190426_3_거북아for반복그림그리자.py | UTF-8 | 311 | 3.6875 | 4 | [] | no_license | # 스크립트 모드입니다.
# 거북아 원을 채우자
import turtle
t1 = turtle.Turtle()
t1.shape("turtle")
color_list = ["yellow", "red", "blue", "Pink", "green", "" ]
n = 0
for n in range(4):
t1.fillcolor(color_list[n])
t1.begin_fill()
t1.circle(100)
t1.end_fill()
t1.fd(100)
| true |
85687bff08f738590426ebf71d70916e536e0ede | Python | kondrashov-do/hackerrank | /10_Days_of_Statistics/Day_0/Weighted_Mean.py | UTF-8 | 358 | 3.046875 | 3 | [] | no_license | #import numpy as np
n = int(input())
x = list(map(int, input().split()))
w = list(map(int, input().split()))
#print(np.average(x, weights=w))
"""
elementsSum = 0
weightsSum = 0
for i in range(n):
elementsSum += x[i]*w[i]
weightsSum += w[i]
print('{0:.1f}'.format(elementsSum/weightsSum))
"""
print(round((sum([i*j for i,j in zip(x, w)]))/sum(w), 1))
| true |