blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bc425304220cb8afee1bde8aa4411ab6539ebc38 | Python | arpithappu/python | /assignment1/q2.py | UTF-8 | 125 | 3.859375 | 4 | [] | no_license | n=int(input("enter a number:"))
sum=0
if n>0:
for i in range(1,n+1):
sum+=1/i
print(f"result is{sum}") | true |
698a80bbc03176ef88c880403a83f205f6da142f | Python | christinekost/python_course | /fibonacci.py | UTF-8 | 180 | 3.46875 | 3 | [] | no_license | a, b = 1, 1
while b < 10**7:
a, b = b, a + b
print(b)
a, b = 1, 1
while len(str(b)) < 1000:
a, b = b, a + b
print(b)
a, b = 1, 1
while b < 10**999:
a, b = b, a + b
print(b) | true |
cf9870d990bfe9b036f78cab7b8e4b548f9ea8c2 | Python | RoPP/pytest-flake8dir | /tests/test_pytest_flake8dir.py | UTF-8 | 2,608 | 2.671875 | 3 | [
"ISC"
] | permissive | import flake8
import pytest
def test_make_py_files_single(flake8dir):
flake8dir.make_py_files(
example="""
x = 1
"""
)
result = flake8dir.run_flake8()
assert result.out_lines == [
"./example.py:1:2: E221 multiple spaces before operator"
]
assert result.exit_code == 1
def test_make_py_files_double(flake8dir):
flake8dir.make_py_files(
example1="""
x = 1
""",
example2="""
y = 2
""",
)
result = flake8dir.run_flake8()
assert set(result.out_lines) == {
"./example1.py:1:2: E221 multiple spaces before operator",
"./example2.py:1:2: E221 multiple spaces before operator",
}
def test_make_py_files_no_positional_args(flake8dir):
with pytest.raises(TypeError) as excinfo:
flake8dir.make_py_files(
1,
example="""
x = 1
""",
)
assert "make_py_files takes no positional arguments" in str(excinfo.value)
def test_make_py_files_requires_at_least_one_kwarg(flake8dir):
with pytest.raises(TypeError) as excinfo:
flake8dir.make_py_files()
assert "make_py_files requires at least one keyword argument" in str(excinfo.value)
def test_make_example_py(flake8dir):
flake8dir.make_example_py(
"""
x = 1
"""
)
result = flake8dir.run_flake8()
assert result.out_lines == [
"./example.py:1:2: E221 multiple spaces before operator"
]
def test_make_setup_cfg(flake8dir):
flake8dir.make_setup_cfg(
"""
[flake8]
ignore = E221
"""
)
flake8dir.make_py_files(
example="""
x = 1
"""
)
result = flake8dir.run_flake8()
assert result.out_lines == []
def test_make_file(flake8dir):
flake8dir.make_file(
"myexample.py",
"""
x = 1
""",
)
result = flake8dir.run_flake8()
assert result.out_lines == [
"./myexample.py:1:2: E221 multiple spaces before operator"
]
def test_extra_args(flake8dir):
flake8dir.make_py_files(
example="""
x = 1
"""
)
result = flake8dir.run_flake8(extra_args=["--ignore", "E221"])
assert result.out_lines == []
def test_extra_args_version(flake8dir):
result = flake8dir.run_flake8(extra_args=["--version"])
assert result.out.startswith(flake8.__version__ + " ")
def test_separate_tmpdir(flake8dir, tmpdir):
flake8dir.make_py_files(
example="""
x = 1
"""
)
assert not tmpdir.join("example.py").check()
| true |
03b111f19e27c12e1ea4171e2a4427f431a9e492 | Python | muralisc/pycrawler | /server.py | UTF-8 | 709 | 2.75 | 3 | [] | no_license | #!/bin/python
import crawler
from flask import Flask, jsonify, request
import logging
import json
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
app = Flask(__name__)
@app.route('/crawl', methods = ['POST'])
def hello_world():
url = request.form['url']
depth = request.form['depth']
logger.info("Recieved request for url: "+url+ " depth: " + depth )
c = crawler.Crawler(url, int(depth))
try:
site_map = c.crawl()
except:
logger.error("Error while crawling", exc_info=True)
return json.dumps('{ "message" : "Server error"}')
return jsonify( dict(site_map) )
if __name__ == '__main__':
app.run()
| true |
254a043fe09885d3d52db3ab3b81bdf5b79a5cb6 | Python | nandodelezo/python | /practica6/p6e5.py | UTF-8 | 991 | 4.59375 | 5 | [] | no_license | ##Escribe un programa que te pida números cada vez más grandes y que se guarden en una lista.
##Para acabar de escribir los números, escribe un número que no sea mayor que el anterior. El programa termina escribiendo la lista de números:
##Escribe un número: 6
##Escribe un número mayor que 6: 10
##Escribe un número mayor que 10: 12
##Escribe un número mayor que 12: 25
##Escribe un número mayor que 25: 9
##Los números que has escrito son: 6, 10, 12, 25 (Comentario si os fijáis ya no se imprime la lista tal cual, hay que imprimir uno por uno los valores de la lista,
##haced esto así a partir de ahora)
listado=[]
num1=int(input("Introduce un número: "))
listado.append(num1)
num2=int(input("Ahora otro mayor que %d: " %(num1)))
while(num2>num1):
listado.append(num2)
num1=num2
num2=int(input("Introduce otro aún mayor "))
print ("Los numeros son:")
for i in listado:
if (listado[-1]):
print (i, end=". ")
else:
print (i, end=", ")
| true |
d60ab20ee7751ef5dfc0397da91e682c8def9601 | Python | sucre03/python | /io/github/sucre/junior/study-20160623.py | UTF-8 | 810 | 4 | 4 | [] | no_license | #20160623
###python2和python3的区别,在print上2没有(),而3有()
print('hello,world')
##一个逗号代表一个空格
print('hello world','do you like scala','yes but i like python better')
print('The quick brown fox', 'jumps over', 'the lazy dog')
print(333)
print(111+222)
name=input('please enter your name:')
print('hello',name)
print('1024*768=',1024*768)
a=-100
#这样看来python的语法好简单,缩进一律用4个空格,':'后面代表代码块
if a>=0:
print(a)
else:
print(-a)
print('I\' m ok')
print('I\'m learning\npython')
print('\\\n\\')
print(123)
print(456.789)
print('\'Hello,world\'')
print('\'Hello,\\\'Adam\\\'\'')
print('r\'\'\'Hello,')
print('Lisa!\'\'\'')
#//为取整,%为取余
print(7//2)
print(7%2)
#Python还允许用r''表示''内部的字符串默认不转义
| true |
4ca45155c02b6172ced2a30d10da64ce55db2f61 | Python | tooringanalytics/libpycontainerize | /src/pycontainerize/containerize.py | UTF-8 | 5,616 | 2.609375 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
'''
Containerize
Create a docker-compose file from a web service specification.
'''
import argparse
import os
import sys
import six
from pycontainerize.constants import DEFAULT_OUTPUT_DIR
from pycontainerize.constants import DEFAULT_PROJECTS_DIR
from pycontainerize.constants import DEFAULT_TEMPLATES_DIR
from pycontainerize.errors import ProjectNotFound
from pycontainerize.errors import TemplatesDirNotFound
from pycontainerize.project import Project
from pycontainerize.renderer import Renderer
'''
Templates are rendered in 4 types of contexts:
1. Project context
The context consists of one object called 'project', which contains
the project-specific parameters.
2. Domain context
The context consists of one object called 'domain', which contains
the domain-specfific parameters.
4. Service context
The context consists of:
- 'service' object containing the service's parameters,
- 'domain' object, containing the encapsulating domain parameters
- 'project' object containing the encapsulating project's parameters
3. Application (App) context
The context consists of:
- 'app' object containing the application's parameters,
- 'domain' object, containing the encapsulating domain parameters
- 'project' object containing the encapsulating project's parameters
Project Layout:
- Project Root
|
+- domains
| +- <domain.name>
| |
| +- services
| | +- <service_name>
| | |
| | +- serviceConfig.json
| |
| +- apps
| | +- <app_name>
| | |
| | +- appConfig.json
| +- certs: SSL/TLS Certificates for Nginx
|
+- projectConfig.json
+- networksConfig.json
Template Directory layout:
- templates
+- djangoapp: Django app templates
|
+- project: Project config file template
|
+- services: nginx config templates
Output Directory Layout:
- Project Root
+- apps
| +- <app_name>
+- services
|
+- docker-compose.yml
'''
class Containerizer(object):
''' Main application class '''
def __init__(self):
self.parser = self.init_argparse()
def init_argparse(self):
parser = argparse.ArgumentParser(
description='Containerize a project spec'
)
parser.add_argument('project_name',
help='Name of the project')
parser.add_argument(
'-d',
'--projects-dir',
help='Projects directory (default: %s)' % DEFAULT_PROJECTS_DIR,
)
parser.add_argument(
'-t',
'--templates-dir',
help='Templates directory (default: %s)' % DEFAULT_TEMPLATES_DIR,
)
parser.add_argument(
'-o',
'--output-dir',
help='Output directory (default: %s)' % DEFAULT_OUTPUT_DIR,
)
return parser
def load_project(self,
project_name,
project_dir=DEFAULT_PROJECTS_DIR,
output_dir=DEFAULT_OUTPUT_DIR):
# Load the project from JSON
project_path = os.path.join(project_dir, project_name)
project = Project.load(project_path)
return project
def render_project(self,
project_dir,
templates_dir,
output_dir):
''' Render the templates for this project '''
# Load the project from JSON
project = Project.load(project_dir)
# Create the Jinja2 Renderer
renderer = Renderer(templates_dir)
# Now render the templates for the project
project.render(renderer, output_dir)
def execute(self, args):
project = args.project_name
# Verify the templates dir exists
if args.templates_dir:
templates_dir = args.templates_dir
else:
templates_dir = DEFAULT_TEMPLATES_DIR
if not os.path.exists(templates_dir) or \
not os.path.isdir(templates_dir):
raise TemplatesDirNotFound(
'Invalid Templates Directory %s' % templates_dir
)
# Verify the project path exists
if args.projects_dir:
projects_dir = args.projects_dir
else:
projects_dir = DEFAULT_PROJECTS_DIR
project_path = os.path.join(projects_dir, project)
if not os.path.exists(project_path) or not os.path.isdir(project_path):
raise ProjectNotFound('Invalid Project path %s' % project_path)
# Ensure the output path exists
if args.output_dir:
output_dir = args.output_dir
else:
output_dir = DEFAULT_OUTPUT_DIR
output_dir = os.path.join(output_dir, project)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Now create the project from configuration files
# and templates.
self.render_project(project_path,
templates_dir,
output_dir)
def main(self):
args = self.parser.parse_args()
try:
self.execute(args)
except Exception as err:
print(err)
six.reraise(type(err), err, sys.exc_info()[2])
def main():
containerizer = Containerizer()
containerizer.main()
if __name__ == "__main__":
main()
| true |
d8e3424312935ee7658293feb3b010b818f9f9e6 | Python | arifcahya/introduction | /KaushalSoni 21BCON368 4rd file.py | UTF-8 | 435 | 3.84375 | 4 | [] | no_license | # This program is used to identify the profit and loss on the particular item
n=int(input("Enter no. of Items:"))
t=0
S=0
K=0
for i in range (0,n):
c=int(input("Enter Cost Price of Item:"))
s=int(input("Enter Selling Price of Item:"))
t+=c
S+=s
print ('Total Cost Price is:',t)
print ('Total Selling Price is:',S)
if S>t:
p=S-t
K=p
print('profit is:')
else:
l=t-S
K=l
print('Loss is:')
print(K)
| true |
cfe0fec8e78ff3aafce92bc5d7d267a5ef8704b2 | Python | marijnkoolen/fuzzy-search | /fuzzy_search/phrase/phrase.py | UTF-8 | 9,868 | 2.71875 | 3 | [
"MIT"
] | permissive | import re
from collections import defaultdict, Counter
from typing import Dict, List, Set, Union
from fuzzy_search.tokenization.string import SkipGram, text2skipgrams
from fuzzy_search.tokenization.token import Token
from fuzzy_search.tokenization.token import Tokenizer
def is_valid_label(label: Union[str, List[str]]) -> bool:
"""Test whether label has a valid value.
:param label: a phrase label (either a string or a list of strings)
:type label: Union[str, List[str]]
:return: whether the label is valid
:rtype: bool
"""
if isinstance(label, list):
for item in label:
if not isinstance(item, str):
return False
return True
return isinstance(label, str)
class Phrase(object):
def __init__(self, phrase: Union[str, Dict[str, str]], ngram_size: int = 2, skip_size: int = 2,
early_threshold: int = 3, late_threshold: int = 3, within_range_threshold: int = 3,
ignorecase: bool = False, tokens: List[Token] = None, tokenizer: Tokenizer = None):
if isinstance(phrase, str):
phrase = {"phrase": phrase}
self.name = phrase["phrase"]
self.phrase_string = self.name if not ignorecase else self.name.lower()
self.exact_string = re.escape(self.phrase_string)
self.extact_word_boundary_string = re.compile(rf"\b{self.exact_string}\b")
self.label = None
self.max_offset: int = -1
self.max_end: int = -1
self.label_set: Set[str] = set()
self.label_list: List[str] = []
self.properties = phrase
self.ngram_size = ngram_size
self.skip_size = skip_size
self.early_threshold = early_threshold
self.late_threshold = len(self.name) - late_threshold - ngram_size
self.within_range_threshold = within_range_threshold
self.ignorecase = ignorecase
self.skipgrams = [skipgram for skipgram in text2skipgrams(self.phrase_string,
ngram_size=ngram_size, skip_size=skip_size)]
self.skipgram_set = set([skipgram.string for skipgram in self. skipgrams])
self.skipgram_index: Dict[str, List[SkipGram]] = defaultdict(list)
self.skipgram_index_lower: Dict[str, List[SkipGram]] = defaultdict(list)
self.skipgram_freq = Counter([skipgram.string for skipgram in self.skipgrams])
self.early_skipgram_index = {skipgram.string: skipgram for skipgram in
self.skipgrams if skipgram.offset < early_threshold}
self.late_skipgram_index = {skipgram.string: skipgram for skipgram in
self.skipgrams if skipgram.offset + skipgram.length > self.late_threshold}
# add lowercase version to allow both matching with and without ignorecase
self.skipgrams_lower = [skipgram for skipgram in text2skipgrams(self.phrase_string.lower(),
ngram_size=ngram_size, skip_size=skip_size)]
self.early_skipgram_index_lower = {skipgram.string: skipgram for skipgram in self.skipgrams_lower
if skipgram.offset < early_threshold}
self.late_skipgram_index_lower = {skipgram.string: skipgram for skipgram in self.skipgrams_lower
if skipgram.offset + skipgram.length > self.late_threshold}
# print(self.late_skipgram_index_lower.keys())
self.skipgram_freq_lower = Counter([skipgram.string for skipgram in self.skipgrams_lower])
self.num_skipgrams = len(self.skipgrams)
self.skipgram_distance = {}
self.metadata: dict = phrase
self.words: List[str] = [word for word in re.split(r"\W+", self.phrase_string) if word != ""]
self.word_set: Set[str] = set(self.words)
self.tokens: List[Token] = tokens
self.token_index = defaultdict(list)
self.first_word = None if len(self.words) == 0 else self.words[0]
self.last_word = None if len(self.words) == 0 else self.words[-1]
self.num_words = len(self.words)
if "label" in phrase:
self.set_label(phrase["label"])
if len(phrase.keys()) > 1:
self.add_metadata(phrase)
self._index_skipgrams()
self._set_within_range()
if tokens is None and tokenizer is not None:
self.tokens = tokenizer.tokenize(self.phrase_string)
self.words = [token.string for token in self.tokens]
for ti, token in enumerate(self.tokens):
self.token_index[token.n].append(ti)
def __repr__(self):
return f"Phrase({self.phrase_string}, {self.label})"
def __len__(self):
return len(self.phrase_string)
# internal methods
def _index_skipgrams(self) -> None:
"""Turn the phrase into a list of skipgrams and index them with their offset(s) as values."""
for skipgram in self.skipgrams:
self.skipgram_index[skipgram.string] += [skipgram]
for skipgram in self.skipgrams_lower:
self.skipgram_index_lower[skipgram.string] += [skipgram]
def _set_within_range(self):
self.skipgram_distance = {}
for index1 in range(0, len(self.skipgrams)-1):
skipgram1 = self.skipgrams[index1]
for index2 in range(index1+1, len(self.skipgrams)):
skipgram2 = self.skipgrams[index2]
if skipgram2.offset - skipgram1.offset > self.within_range_threshold:
continue
if (skipgram1, skipgram2) not in self.skipgram_distance:
self.skipgram_distance[(skipgram1, skipgram2)] = skipgram2.offset - skipgram1.offset
elif self.skipgram_distance[(skipgram1, skipgram2)] > skipgram2.offset - skipgram1.offset:
self.skipgram_distance[(skipgram1, skipgram2)] = skipgram2.offset - skipgram1.offset
# external methods
def set_label(self, label: Union[str, List[str]]) -> None:
"""Set the label(s) of a phrase. Labels must be string and can be a single string or a list.
:param label: the label(s) of a phrase
:type label: Union[str, List[str]]
"""
if not is_valid_label(label):
raise ValueError("phrase label must be a single string or a list of strings:", label)
self.label = label
if isinstance(label, str):
self.label_set = {label}
self.label_list = [label]
else:
self.label_set = set(label)
self.label_list = label
def has_label(self, label_string: str) -> bool:
"""Check if a given label belongs to at least one phrase in the phrase model.
:param label_string: a label string
:type label_string: str
:return: a boolean whether the label is part of the phrase model
:rtype: bool
"""
if isinstance(self.label, list):
return label_string in self.label
else:
return label_string == self.label
def add_metadata(self, metadata_dict: Dict[str, any]) -> None:
"""Add key/value pairs as metadata for this phrase.
:param metadata_dict: a dictionary of key/value pairs as metadata
:type metadata_dict: Dict[str, any]
:return: None
:rtype: None
"""
for key in metadata_dict:
self.metadata[key] = metadata_dict[key]
if key == "label":
self.set_label(metadata_dict[key])
elif key == "max_offset":
self.add_max_offset(metadata_dict["max_offset"])
def add_max_offset(self, max_offset: int) -> None:
"""Add a maximum offset for matching a phrase in a text.
:param max_offset: the maximum offset to allow a phrase to match
:type max_offset: int
"""
if not isinstance(max_offset, int):
raise TypeError("max_offset must be a positive integer")
if max_offset < 0:
raise ValueError("max_offset must be positive")
self.max_offset = max_offset
self.max_end = self.max_offset + len(self.phrase_string)
def has_skipgram(self, skipgram: str) -> bool:
"""For a given skipgram, return boolean whether it is in the index
:param skipgram: an skipgram string
:type skipgram: str
:return: A boolean whether skipgram is in the index
:rtype: bool"""
return skipgram in self.skipgram_index.keys()
def skipgram_offsets(self, skipgram_string: str) -> Union[None, List[int]]:
"""For a given skipgram return the list of offsets at which it appears.
:param skipgram_string: an skipgram string
:type skipgram_string: str
:return: A list of string offsets at which the skipgram appears
:rtype: Union[None, List[int]]"""
if not self.has_skipgram(skipgram_string):
return None
return [skipgram.offset for skipgram in self.skipgram_index[skipgram_string]]
def within_range(self, skipgram1, skipgram2):
if not self.has_skipgram(skipgram1) or not self.has_skipgram(skipgram2):
return False
elif (skipgram1, skipgram2) not in self.skipgram_distance:
return False
elif self.skipgram_distance[(skipgram1, skipgram2)] > self.within_range_threshold:
return False
else:
return True
def is_early_skipgram(self, skipgram: str) -> bool:
"""For a given skipgram, return boolean whether it appears early in the phrase.
:param skipgram: an skipgram string
:type skipgram: str
:return: A boolean whether skipgram appears early in the phrase
:rtype: bool"""
return skipgram in self.early_skipgram_index
| true |
1c6112a86918f09080933471ac507c2384a4b176 | Python | songye38/Computing-Form-and-Shape | /Session2/src/session2_example7.py | UTF-8 | 478 | 3.015625 | 3 | [] | no_license | #session2 - 6
#nesting loops to create many curves with random values
import rhinoscriptsyntax as rs
import random
#we can establish 10 curves
for c in range(3):
#we have to create list to append point
listOfPoints = []
for q in range(50):
v1 = random.uniform(-100,100)
v2 = random.uniform(-100,100)
v3 = random.uniform(-100,100)
listOfPoints.append([v1,v2,v3])
#outside of inner for loop
rs.AddInterpCurve(listOfPoints,3)
| true |
29bd23f1c053fc6ceaa24b036f76aee1c2e44263 | Python | junyechen/PAT-Advanced-Level-Practice | /1058 A+B in Hogwarts.py | UTF-8 | 1,304 | 4.03125 | 4 | [] | no_license | """
If you are a fan of Harry Potter, you would know the world of magic has its own currency system -- as Hagrid explained it to Harry, "Seventeen silver Sickles to a Galleon and twenty-nine Knuts to a Sickle, it's easy enough." Your job is to write a program to compute A+B where A and B are given in the standard form of Galleon.Sickle.Knut (Galleon is an integer in [0,107], Sickle is an integer in [0, 17), and Knut is an integer in [0, 29)).
Input Specification:
Each input file contains one test case which occupies a line with A and B in the standard form, separated by one space.
Output Specification:
For each test case you should output the sum of A and B in one line, with the same format as the input.
Sample Input:
3.2.1 10.16.27
Sample Output:
14.1.28
"""
#############################################
"""
本题非常简单,一次通过
python无需考虑数位溢出问题
"""
#############################################
A, B = input().split()
A = list(map(int, A.split('.')))
B = list(map(int, B.split('.')))
C = [0] * 3
if A[2] + B[2] >= 29:
C[2] = A[2] + B[2] - 29
C[1] = 1
else:
C[2] = A[2] + B[2]
if A[1] + B[1] + C[1] >= 17:
C[1] += A[1] + B[1] - 17
C[0] = 1
else:
C[1] += A[1] + B[1]
C[0] += A[0] + B[0]
print('%d.%d.%d' % (C[0], C[1], C[2]))
| true |
3a11cdedfd58ab4fd5e49c9c59388f2a797a6521 | Python | sachinraut11/PyCharm | /method of override.py | UTF-8 | 554 | 4.53125 | 5 | [] | no_license | # Python program to demonstrate
# method overriding
# Defining parent class
class Parent():
# Constructor
def __init__(self):
self.value = "Inside Parent"
# Parent's show method
def show(self):
print(self.value)
# Defining child class
class Child(Parent):
# Constructor
def __init__(self):
self.value = "Inside Child"
# Child's show method
def show(self):
print(self.value)
# Driver's code
obj1 = Parent()
obj2 = Child()
obj1.show()
obj2.show()
| true |
b9e39b64d0f457ffa25233d9f271068f7d09b442 | Python | conorgpower/DataMining_Lab1 | /sqlData.py | UTF-8 | 2,661 | 3.5 | 4 | [] | no_license | import pandas as pd
import numpy as np
import pymysql
# STEP 6
# Q. Now your mysql installation contains BSCY4 database that contains 1 table,
# AVOCADO. Use pymysql module to import contents of the table via pandas.
# A.
connection = pymysql.connect(host='127.0.0.1', user='root', password ='', db='BSCY4')
df = pd.read_sql('SELECT * FROM AVOCADO', con =connection)
cleanData = df
# STEP 7
# Q. Cleanse the content of the field "region".
cleanData['region'] = df['region'].str.replace(' ','')
cleanData['region'] = df['region'].str.replace('-','')
# Q. What can you say about the regions represented?
# A.
print(df['region'].unique())
# The regions represented are very poorly categorised. Some values are for states and
# others are for regions within these states. Therefore there are overlapping values.
# Other values simply indicate direction with no context e.g. 'West'.One uniform
# naming convention should be implemented.
# Q. How many different regions there are?
# A. 54 regions after cleaning.
print("Total regions after cleaning: ", len(cleanData['region'].unique()))
# Q. Are there problems with this variable, if yes, what are the problems and how many?
# A. Yes.
print(df['region'].unique())
# There are multiple errors in this column:
# (1) 'Baltimore-Washington' should not contain a hyphen.
# (2) ' Denver' should not have a space at the begining
# (3) ' Denver ' should not have a space at the begining or the end
# STEP 8
# Q. What years are represented?
# A. 2015 2016 2017 2018 are the years represented.
print(df['year'].unique())
# Q. Describe any errors that you see in data.
# A. Some 2017 values are represented as 17, some 2018 values are represented as 18.
# Q. How many rows are affected?
# A. 3208 rows were affected.
error = pd.to_datetime(df['year'], errors='coerce', format='%Y')
print ("Total errors: ", sum(error.isna()))
# Q. Cleanse the content of the field "year".
cleanData['year'] = df['year'].replace(17, 2017)
cleanData['year'] = df['year'].replace(18, 2018)
# '18': '2018'
# STEP 9
# Q. Describe any errors that you see.
# A. Conventional is present with a capital and with out a capital.
# Because th CSV table contained conventional with no capital I will
# assume that that is correct.
# Q. How many rows are affected?
# A. 169 rows are effected.
print (len(df['type']) - sum(df['type'] =='conventional'))
# Q. What avocado type are represented?
print (df['type'].unique())
# A. 'conventional' is the only type represented.
# Q. Cleanse the content of the field "type".
cleanData['type'] = df['type'].str.lower()
| true |
751e767f489023cab5f939e61f7862192267a821 | Python | cjsmith33/CIS554-Controlling-Prog-Flow | /Using Conditional Statements/Exercise6/test.py | UTF-8 | 2,554 | 3.609375 | 4 | [] | no_license | """
A completed test script for the Pig Latin module.
Author: Charles Smith
Date: 01 January 2021
"""
import funcs
import introcs
def test_first_vowel():
"""
Test procedure for the function first_vowel()
"""
print('Testing first_vowel()')
# No vowels
result = funcs.first_vowel('grrm')
introcs.assert_equals(-1,result)
# Letter a
result = funcs.first_vowel('pat')
introcs.assert_equals(1,result)
# Letter e
result = funcs.first_vowel('step')
introcs.assert_equals(2,result)
# Letter i
result = funcs.first_vowel('strip')
introcs.assert_equals(3,result)
# Letter o
result = funcs.first_vowel('stop')
introcs.assert_equals(2,result)
# Letter u
result = funcs.first_vowel('truck')
introcs.assert_equals(2,result)
# Letter y, not a vowel
result = funcs.first_vowel('ygglx')
introcs.assert_equals(-1,result)
# Letter y as vowel
result = funcs.first_vowel('sky')
introcs.assert_equals(2,result)
# Various multi-vowel combinations
result = funcs.first_vowel('apple')
introcs.assert_equals(0,result)
result = funcs.first_vowel('sleep')
introcs.assert_equals(2,result)
result = funcs.first_vowel('year')
introcs.assert_equals(1,result)
# Feel free to add more
def test_pigify():
"""
Test procedure for the function pigify()
"""
print('Testing pigify()')
# No vowels
result = funcs.pigify('grrm')
introcs.assert_equals('grrmay',result)
# Letter a
result = funcs.pigify('pat')
introcs.assert_equals('atpay',result)
# Letter e
result = funcs.pigify('step')
introcs.assert_equals('epstay',result)
# Letter i
result = funcs.pigify('strip')
introcs.assert_equals('ipstray',result)
# Letter o
result = funcs.pigify('stop')
introcs.assert_equals('opstay',result)
# Letter u
result = funcs.pigify('truck')
introcs.assert_equals('ucktray',result)
# Letter y, not a vowel
result = funcs.pigify('ygglx')
introcs.assert_equals('ygglxay',result)
# Letter y as vowel
result = funcs.pigify('sky')
introcs.assert_equals('yskay',result)
# Various multi-vowel combinations
result = funcs.pigify('apple')
introcs.assert_equals('applehay',result)
result = funcs.pigify('sleep')
introcs.assert_equals('eepslay',result)
# Letter Qu
result = funcs.pigify('quiet')
introcs.assert_equals('ietquay',result)
test_first_vowel()
test_pigify()
print('Module funcs passed all tests.')
| true |
728c34ff067a2ad7384d8d76076412bf8a6409c9 | Python | douglask3/GDAY | /src/bewdy.py | UTF-8 | 13,755 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
""" Photosynthesis model """
import sys
import datetime
from math import fabs, exp, sqrt, sin, pi, log
import constants as const
from utilities import float_eq, float_lt, float_le, float_gt, day_length
__author__ = "Martin De Kauwe"
__version__ = "1.0 (08.03.2011)"
__email__ = "mdekauwe@gmail.com"
class Bewdy(object):
""" BEWDY - calculates plant C assimilation.
Mechanistic model of gross canopy photosynthesis (GPP) as a function of
LAI, intensity of direct radiation, intensity of diffuse radiation at
the top of the canopy and leaf N content at the top of the canopy.
See Medlyn et al for more details.
*** RESULTS FROM THIS DON'T MAKE SENSE, DON'T USE, 22 APR 2013 ***
References:
-----------
* Medlyn, B. E. et al (2000) Canadian Journal of Forest Research,30,873-888.
"""
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, object
model control flags
params: floats, object
model parameters
state: floats, object
model state
fluxes : floats, object
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
def calculate_photosynthesis(self, frac_gcover, day, daylen):
"""
Parameters:
-----------
frac_gcover : float
fraction of ground cover
day : integer
project day
daylen : float
daylength in hours
Returns:
--------
Nothing
Method calculates GPP, NPP and Ra.
"""
(temp, sw_rad, ca, vpd) = self.get_met_data(day)
daylength = daylen * const.SECS_IN_HOUR
# calculated from the canopy-averaged leaf N
leaf_absorptance = ((self.state.ncontent / 2.8) /
(self.state.ncontent / 2.8 + 0.076))
direct_rad = sw_rad / daylength / 0.235 * self.params.direct_frac
diffuse_rad = (sw_rad / daylength / 0.235 *
(1.0 - self.params.direct_frac))
(quantum_yield, rho) = self.calculate_bewdy_params(temp, ca, vpd)
b = quantum_yield * self.params.kext * direct_rad * leaf_absorptance
s = quantum_yield * self.params.kext * diffuse_rad * leaf_absorptance
# effect of incomplete ground cover - modifies lai to lai/cover
# (jackson & palmer)
lai_mod = self.state.lai / frac_gcover
n = (rho * self.params.kext *
(self.state.ncontent - self.params.nmin) * lai_mod /
(1.0 - exp(-self.params.kext * lai_mod)))
self.fluxes.gpp = ((self.sunlit_contribution(b, s, n, lai_mod) +
self.shaded_contribution(b, s, n, lai_mod)))
# rescale gpp
self.fluxes.gpp *= daylength * const.UMOL_TO_MOL
self.fluxes.npp = self.calculate_npp(temp, frac_gcover)
self.fluxes.npp_gCm2 = (self.fluxes.npp * const.M2_AS_HA /
const.G_AS_TONNES)
self.fluxes.gpp_gCm2 = self.fluxes.npp_gCm2 / self.params.cue
#print self.fluxes.npp
self.fluxes.apar = -999.9
def get_met_data(self, day):
""" Grab the days met data out of the structure and return day values.
Parameters:
----------
day : int
project day.
Returns:
-------
temp : float
am/pm temp in a list [degC]
sw_rad : float
SW down radiation [mj/m2/day]
ca : float
atmospheric co2, depending on flag set in param file this will be
ambient or elevated.
"""
temp = self.met_data['tair'][day]
sw_rad = self.met_data['sw_rad'][day]
ca = self.met_data['co2'][day]
vpd = self.met_data['vpd_avg'][day]
return temp, sw_rad, ca, vpd
def calculate_bewdy_params(self, temp, ca, vpd):
""" Calculates BEWDY model parameters
Estimate the quantum yield (alpha) of absorbed radiation and rho, the
linear relationship between photosynthesis and leaf N content, using
the Farquhar and von Caemmerer (1982) model of leaf photosynthesis
In this model leaf photosysnthesis is given by the minimum of the rate
of carboxylation when Rubiso is limiting (ac) and the rate of
carboxylation when RUBP regeneration is limiting (aj).
Temperature is assumed to affect km, gamma_star, jmax and vcmax.
may want to use harley - type self.fluxes.temperature functions? However
these temperature dependences are taken from Brooks and Farquahr (1985)
for gamma_star, McMurtrie and Wang (1993) for km, and Kirschbaum (1986)
for jmax and vcmax.
Parameters:
-----------
temp : float
air temperature [degC]
ca : float
atmospheric co2, depending on flag set in param file this will be
ambient or elevated. [umol mol-1]
vpd : float
vpd [kPa]
Returns:
--------
quantum_yield : float
quantum yield of absorbed radiation
rho : float
model parameter
"""
# co2 compensation point in the absence of mitochondrial respiration
gamma_star = (42.7 + 1.68 * (temp - 25.0) + 0.012 * (temp - 25.0)**2)
# effective Michaelis-Menen coefficent of Rubisco activity
km = 39.05 * exp(0.085 * temp) + 9.58 * gamma_star
# max rate of electron transport and rubisco activity
(jmax, vcmax) = self.jmax_and_vcmax_func(temp)
# co2 concentration of intercellular air spaces
ci = self.intercellular_co2_conc(gamma_star, ca, vpd)
# quantum yield of absorbed radiation
quantum_yield = self.calculate_quantum_yield(ci, gamma_star)
# rate of carboxylation when rubiusco is limiting (Ac)
aj = ((jmax / 4.0) * ((ci - gamma_star) / (ci + 2. * gamma_star)) *
const.MOL_C_TO_GRAMS_C)
# rate of carboxylation when RUBP regeneration is limiting (Aj)
ac = vcmax * ((ci - gamma_star) / (ci + km)) * const.MOL_C_TO_GRAMS_C
rho = min(ac, aj)
return quantum_yield, rho
def sunlit_contribution(self, b, s, n, lai_mod):
"""Calculate contribution from sunlit foliage
Parameters:
-----------
b : float
model parameter
s : float
model parameter
n : float
model parameter
lai_mod : float
effect of incomplete ground cover - modifies lai to lai/cover
Returns:
--------
sunlit_contribution : float
contribution from sunlit foliage to GPP
"""
arg1 = (1.0 / self.params.kext *
(1.0 - exp(-self.params.kext * lai_mod)))
arg2 = (n * s * (n + s) + b * n**2) / (n + s)**2
return arg1 * arg2
def shaded_contribution(self, b, s, n, lai_mod):
"""Calculate contribution from shaded foliage
Parameters:
-----------
b : float
model parameter
s : float
model parameter
n : float
model parameter
lai_mod : float
effect of incomplete ground cover - modifies lai to lai/cover
Returns:
--------
shaded_contribution : float
contribution from shaded foliage to GPP
"""
arg1 = 1.0 / self.params.kext * (b**2 * n**2) / (n + s)**3.0
arg2 = (log(((n + s) * exp(-self.params.kext *
lai_mod) + b) / (n + s + b)))
return arg1 * arg2
def calculate_npp(self, temp, frac_gcover):
""" figure out net photosynthesis
Parameters:
-----------
temp : float
air temperature
frac_gcover : float
fraction of ground cover
Returns:
--------
npp : float
net primary productivity
"""
if self.control.assim_model == 5:
# use dependence on nitrogen and temperature
self.fluxes.auto_resp = self.calc_autotrophic_respiration(temp)
npp = (self.params.growth_efficiency *
(self.fluxes.gpp * frac_gcover * const.G_M2_2_TON_HEC -
self.fluxes.auto_resp))
else:
# use proportionality with GPP
npp = (self.params.cue * self.fluxes.gpp * frac_gcover *
const.G_AS_TONNES / const.M2_AS_HA)
return npp
def calc_autotrophic_respiration(self, temp):
"""Calculate respiration with dependence on N and temperature
Parameters:
-----------
temp : float
air temperature
Returns:
--------
ra : float
autotrophic respiration
"""
plantn = self.state.shootn + self.state.rootn + self.state.stemnmob
ra = (0.0106 * plantn * 12.0 / 14.0 *
exp(self.params.kq10 * (temp - 15.0)))
return ra
def jmax_and_vcmax_func(self, temp):
""" Maximum rate of electron transport (jmax) and of rubisco activity
Parameters:
-----------
temp : float
air temperature
Returns:
--------
jmax : float
maximum rate of electron transport
vcmax : float
maximum rate of Rubisco activity
"""
if float_gt(temp, 10.0):
jmax = (self.params.jmaxna * (1.0 + (temp - 25.0) * (0.05 +
(temp - 25.0) * (-1.81 * 1E-3 + (temp - 25.0) *
(-1.37 * 1E-4)))))
vcmax = (self.params.vcmaxna * (1.0 + (temp - 25.0) *
(0.0485 + (temp - 25.0) * (-6.93 * 1E-4 + (temp - 25.0) *
(-3.9 * 1E-5)))))
elif float_gt(temp, 0.0):
jmax = self.params.jmaxna * 0.0305 * temp
vcmax = self.params.vcmaxna * 0.0238 * temp
else:
jmax = 0.0
vcmax = 0.0
return jmax, vcmax
def intercellular_co2_conc(self, gamma_star, ca, vpd):
""" Calculate the intercellular (Ci) concentration
Formed by substituting gs = g0 + 1.6 * (1 + (g1/sqrt(D))) * A/Ca into
A = gs / 1.6 * (Ca - Ci) and assuming intercept (g0) = 0.
Parameters:
----------
vpd : float
vapour pressure deficit
ca : float
ambient co2 concentration
Returns:
-------
ci:ca : float
ratio of intercellular to atmospheric CO2 concentration
References:
-----------
* Medlyn, B. E. et al (2011) Global Change Biology, 17, 2134-2144.
"""
if self.control.gs_model == "MEDLYN":
g1w = self.params.g1 * self.state.wtfac_root
cica = g1w / (g1w + sqrt(vpd))
ci = cica * ca
else:
raise AttributeError('Only Belindas gs model is implemented')
return ci
def calculate_quantum_yield(self, ci, gamma_star):
"""co2 fixed / photons absorbed
Parameters:
-----------
gamma_star : float
CO2 compensation point in the abscence of mitochondrial respiration
ci : float
intercellular CO2 concentration.
Returns:
--------
alpha_a : float
model_parameter
"""
arg1 = self.params.alpha_j / 4.0
arg2 = ((ci - gamma_star) / (ci + 2. * gamma_star))
return arg1 * arg2 * const.MOL_C_TO_GRAMS_C
if __name__ == "__main__":
from file_parser import initialise_model_data
from utilities import float_lt, day_length
import datetime
fname = "/Users/mdekauwe/src/python/pygday/params/duke_testing.cfg"
(control, params, state, files,
fluxes, met_data,
print_opts) = initialise_model_data(fname, DUMP=False)
B = Bewdy(control, params, state, fluxes, met_data)
state.lai = (params.slainit * const.M2_AS_HA /
const.KG_AS_TONNES / params.cfracts *
state.shoot)
# Specific LAI (m2 onesided/kg DW)
state.sla = params.slainit
year = str(control.startyear)
month = str(control.startmonth)
day = str(control.startday)
datex = datetime.datetime.strptime((year + month + day), "%Y%m%d")
#laifname = "/Users/mdekauwe/research/NCEAS_face/GDAY_duke_simulation/experiments/lai"
#import numpy as np
#laidata = np.loadtxt(laifname)
for project_day in xrange(len(met_data['prjday'])):
state.shootnc = state.shootn / state.shoot
# when it reads the duke file the shootn is very low and it buggers
# this up if ur running standalone to test so play with the shootnc
# ratio. Checked and the actual code seems fine
#state.shootnc = 0.02
state.ncontent = (state.shootnc * params.cfracts /
state.sla * const.KG_AS_G)
daylen = day_length(datex, params.latitude)
if float_lt(state.lai, params.lai_cover):
frac_gcover = state.lai / params.lai_cover
else:
frac_gcover = 1.0
B.calculate_photosynthesis(frac_gcover, datex, project_day, daylen)
print fluxes.gpp_gCm2
datex += datetime.timedelta(days=1)
| true |
6956d2d5f0bde821eb05ef676c018bd4cfa46501 | Python | luke28/NENIF | /script/sort_cascades.py | UTF-8 | 550 | 2.9375 | 3 | [] | no_license | import os
import sys
a = []
with open('../data/cascades256.txt', "r") as f:
for line in f:
line = line.strip();
if len(line) == 0:
continue
nums = line.split(",")
b = []
for i in xrange(0, len(nums), 2):
b.append((int(nums[i]), float(nums[i+1])))
b.sort(key = lambda t : t[1])
a.append(b)
with open('../data/cascades256_sorted', "w") as f:
for item in a:
for it in item:
f.write(str(it[0]) + "\t" + str(it[1]) + "\t")
f.write("\n")
| true |
6615082ba8b078ed0b9fd89bb35d17ef1abe74a7 | Python | ChaitanyaCixLive/Keras_Examples | /AntiRectifier/antirectifier.py | UTF-8 | 3,687 | 3.390625 | 3 | [] | no_license | '''The example demonstrates how to write custom layers for Keras.
We build a custom activation layer called 'Antirectifier',
which modifies the shape of the tensor that passes through it.
We need to specify two methods: `compute_output_shape` and `call`.
Note that the same result can also be achieved via a Lambda layer.
Because our custom layer is written with primitives from the Keras
backend (`K`), our code can run both on TensorFlow and Theano.
'''
import keras
from keras.models import Sequential
from keras import layers
from keras.datasets import mnist
from keras import backend as K
class Antirectifier(layers.Layer):
'''This is the combination of a sample-wise
L2 normalization with the concatenation of the
positive part of the input with the negative part
of the input. The result is a tensor of samples that are
twice as large as the input samples.
It can be used in place of a ReLU.
# Input shape
2D tensor of shape (samples, n)
# Output shape
2D tensor of shape (samples, 2*n)
# Theoretical justification
When applying ReLU, assuming that the distribution
of the previous output is approximately centered around 0.,
you are discarding half of your input. This is inefficient.
Antirectifier allows to return all-positive outputs like ReLU,
without discarding any data.
Tests on MNIST show that Antirectifier allows to train networks
with twice less parameters yet with comparable
classification accuracy as an equivalent ReLU-based network.
'''
def compute_output_shape(self, input_shape):
shape = list(input_shape)
assert len(shape) == 2 # only valid for 2D tensors
shape[-1] *= 2
return tuple(shape)
def call(self, inputs):
inputs -= K.mean(inputs, axis=1, keepdims=True)
inputs = K.l2_normalize(inputs, axis=1)
pos = K.relu(inputs)
neg = K.relu(-inputs)
return K.concatenate([pos, neg], axis=1)
class Model():
def __init__(self, batch_size = 128, num_classes = 10, epochs = 40):
self.batch_size = batch_size
self.num_classes = num_classes
self.epochs = epochs
self.model = Model._build_model()
def get_data(self):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, self.num_classes)
y_test = keras.utils.to_categorical(y_test, self.num_classes)
return x_train, x_test, y_train, y_test
@staticmethod
def _build_model():
model = Sequential()
model.add(layers.Dense(256, input_shape=(784,)))
model.add(Antirectifier())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(256))
model.add(Antirectifier())
model.add(layers.Dropout(0.1))
model.add(layers.Dense(10))
model.add(layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def run(self):
x_train, x_test, y_train, y_test = self.get_data()
self.model.fit(x_train, y_train,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=1,
validation_data=(x_test, y_test))
if __name__ == "__main__":
model = Model()
model.run()
| true |
1e03d876a69ec669032e20af663aa27c0ac551cd | Python | yujianzhang7/COEN296_fall2018 | /helper.py | UTF-8 | 2,722 | 2.859375 | 3 | [] | no_license | #! /usr/bin/env python3
#-*- coding:utf-8 -*-
from paths import raw_dir, sxhy_path, check_uptodate
from singleton import Singleton
import jieba
import os
_rawsxhy_path = os.path.join(raw_dir, 'shixuehanying.txt')
def _gen_sxhy_dict():
print("Parsing shixuehanying dictionary ...")
words = set()
with open(_rawsxhy_path, 'r') as fin:
for line in fin.readlines():
if line[0] == '<':
continue
for phrase in line.strip().split()[1:]:
if not is_cn_sentence(phrase):
continue
idx = 0
while idx + 4 <= len(phrase):
# Cut 2 chars each time.
words.add(phrase[idx : idx + 2])
idx += 2
# Use jieba to cut the last 3 chars.
if idx < len(phrase):
for word in jieba.lcut(phrase[idx:]):
words.add(word)
with open(sxhy_path, 'w') as fout:
fout.write(' '.join(words))
class Segmenter(Singleton):
def __init__(self):
if not check_uptodate(sxhy_path):
_gen_sxhy_dict()
with open(sxhy_path, 'r') as fin:
self.sxhy_dict = set(fin.read().split())
def segment(self, sentence):
toks = []
idx = 0
while idx + 4 <= len(sentence):
# Cut 2 chars each time.
if sentence[idx : idx + 2] in self.sxhy_dict:
toks.append(sentence[idx : idx + 2])
else:
for tok in jieba.lcut(sentence[idx : idx + 2]):
toks.append(tok)
idx += 2
# Cut last 3 chars.
if idx < len(sentence):
if sentence[idx : ] in self.sxhy_dict:
toks.append(sentence[idx : ])
else:
for tok in jieba.lcut(sentence[idx : ]):
toks.append(tok)
return toks
def is_cn_char(ch):
""" Test if a char is a Chinese character. """
return ch >= u'\u4e00' and ch <= u'\u9fa5'
def is_cn_sentence(sentence):
""" Test if a sentence is made of Chinese characters. """
for ch in sentence:
if not is_cn_char(ch):
return False
return True
def split_sentences(text):
""" Split a piece of text into a list of sentences. """
sentences = []
i = 0
for j in range(len(text) + 1):
if j == len(text) or \
text[j] in [u',', u'。', u'!', u'?', u'、', u'\n']:
if i < j:
sentence = u''.join(filter(is_cn_char, text[i:j]))
sentences.append(sentence)
i = j + 1
return sentences
NUM_OF_SENTENCES = 4
CHAR_VEC_DIM = 512
| true |
0f550115b7e9b9efff3363c53bfb71178c151023 | Python | scortier/Leetcode-Submissions | /problems/diagonal_traverse/solution.py | UTF-8 | 895 | 3.71875 | 4 | [] | no_license | class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
# check empty matrix
if not matrix or not matrix[0]:
return []
# Store length and width of the matrix
N, M = len(matrix), len(matrix[0])
# Initilize a defaultdict to store the items in the same diagonal
tmp, result = defaultdict(list), []
for row in range(N):
for column in range(M):
tmp[row+column].append(matrix[row][column])
# Add key, value pair from the tmp dictionary to result
for key, values in tmp.items():
print(key, values)
if key % 2 == 0:
result.extend(reversed(values))
else:
result.extend(values)
return result | true |
ef562e67b9330247e7e57720d77a3300ff70a6f3 | Python | seckalou/MLAlgorithms | /mla/knn.py | UTF-8 | 1,608 | 3.234375 | 3 | [
"MIT"
] | permissive | from collections import Counter
import numpy as np
from scipy.spatial.distance import euclidean
from mla.base import BaseEstimator
class KNN(BaseEstimator):
def __init__(self, k=5, distance_func=euclidean):
"""Nearest neighbors classifier.
Note: if there is a tie for the most common label among the neighbors,
then the predicted label is arbitrary.
Parameters
----------
k : int, default 5
The number of neighbors to take into account.
distance_func : function, default euclidean distance
A distance function taking two arguments. Any function from
scipy.spatial.distance will do.
"""
self.k = k
self.distance_func = distance_func
def _predict(self, X=None):
predictions = [self._predict_x(x) for x in X]
return np.array(predictions)
def _predict_x(self, x):
"""Predict the label of a single instance x."""
# compute distances between x and all examples in the training set.
distances = [self.distance_func(x, example) for example in self.X]
# Sort all examples by their distance to x and keep their label.
neighbors = sorted(((dist, label)
for (dist, label) in zip(distances, self.y)),
key=lambda x: x[0])
# Get labels of the k-nn and compute the most common one.
neighbors_labels = [label for (_, label) in neighbors[:self.k]]
most_common_label = Counter(neighbors_labels).most_common(1)[0][0]
return most_common_label
| true |
2612b2a6868a58ba25550b83cee6f7b89999409e | Python | patrotom/combinatorial-optimization-problems | /sat/lib/genetic.py | UTF-8 | 5,317 | 2.734375 | 3 | [
"MIT"
] | permissive | import copy
import random
import numpy as np
from timeit import default_timer as timer
from sat.lib.solution import Solution
class Genetic:
def __init__(self, inst, opts):
self.inst = inst
self.opts = opts
self.sol = Solution(inst.vars_num)
def run(self):
start = timer()
self.__run()
end = timer()
self.sol.time = end - start
best_indv = self.__find_elite()
self.sol.w_sum = best_indv.fitness
self.sol.conf = best_indv.conf
self.sol.rel_err = abs(self.sol.w_sum - self.inst.opt_sum) / \
max(self.inst.opt_sum, self.sol.w_sum)
def __run(self):
self.__init_generation()
for it in range(self.opts["g"]):
gen = []
self.elite = copy.deepcopy(self.__find_elite())
gen.append(self.elite)
while len(gen) < len(self.prev_gen):
parents = self.__tournament()
offsprings = self.__crossover(parents)
offsprings = self.__mutate(offsprings)
gen += offsprings
self.prev_gen = gen
self.__pandemic()
if it < int(self.opts["g"] / 2):
self.__war()
def __init_generation(self):
self.prev_gen = []
pop_size = (self.opts["p"], self.inst.vars_num)
confs = np.random.randint(2, size=pop_size).tolist()
for conf in confs:
fitness = self.__calc_fitness(conf)
self.prev_gen.append(Individual(conf, fitness))
self.max_fitness = self.__find_elite().fitness
self.stale_cnt = 0
def __tournament(self):
sample1 = random.sample(self.prev_gen, k=5)
sample2 = random.sample(self.prev_gen, k=5)
tour1 = self.__find_fittest_two(sample1)
tour2 = self.__find_fittest_two(sample2)
parents = self.__find_fittest_two(tour1 + tour2)
return parents
def __crossover(self, parents):
if random.random() > self.opts["c"]:
return copy.deepcopy(parents)
p1, p2 = sorted(random.sample(list(range(self.inst.vars_num)), k=2))
p_conf1, p_conf2 = parents[0].conf, parents[1].conf
o_conf1 = p_conf1[:p1] + p_conf2[p1:p2] + p_conf1[p2:]
o_conf2 = p_conf2[:p1] + p_conf1[p1:p2] + p_conf2[p2:]
fitness1 = self.__calc_fitness(o_conf1)
fitness2 = self.__calc_fitness(o_conf2)
i1 = Individual(o_conf1, fitness1)
i2 = Individual(o_conf2, fitness2)
return [i1, i2]
def __mutate(self, offsprings):
for offspring in offsprings:
if random.random() > self.opts["m"]:
continue
rand_idx = random.choice(list(range(self.inst.vars_num)))
offspring.conf[rand_idx] = int(not offspring.conf[rand_idx])
offspring.fitness = self.__calc_fitness(offspring.conf)
return offsprings
def __find_fittest_two(self, sample):
sample = sorted(sample, key=lambda x: x.fitness, reverse=True)
return [sample[0], sample[1]]
def __find_elite(self):
return max(self.prev_gen, key=lambda x: x.fitness)
def __pandemic(self):
if not self.opts["pan"]:
return
if self.stale_cnt < 50:
if self.elite.fitness == self.max_fitness:
self.stale_cnt += 1
else:
self.max_fitness = self.elite.fitness
self.stale_cnt = 0
return
to_wipe = int(self.opts["p"] / 2)
kept = sorted(
self.prev_gen, key=lambda x: x.fitness, reverse=True
)[:to_wipe]
pop_size = (self.opts["p"] - to_wipe, self.inst.vars_num)
confs = np.random.randint(2, size=pop_size).tolist()
for conf in confs:
fitness = self.__calc_fitness(conf)
kept.append(Individual(conf, fitness))
self.prev_gen = kept
self.stale_cnt = 0
def __war(self):
if not self.opts["war"]:
return
if self.stale_cnt < 50:
if self.elite.fitness == self.max_fitness:
self.stale_cnt += 1
else:
self.max_fitness = self.elite.fitness
self.stale_cnt = 0
return
to_wipe = int(self.opts["p"] / 2)
kept = random.sample(self.prev_gen, k=to_wipe)
pop_size = (self.opts["p"] - to_wipe, self.inst.vars_num)
confs = np.random.randint(2, size=pop_size).tolist()
for conf in confs:
fitness = self.__calc_fitness(conf)
kept.append(Individual(conf, fitness))
self.prev_gen = kept
self.stale_cnt = 0
def __calc_fitness(self, conf):
fitness = -self.inst.cls_num
for clause in self.inst.clauses:
for v in clause:
idx = abs(v) - 1
val = int(not conf[idx]) if v < 0 else conf[idx]
if val == 1:
fitness += 1
break
if fitness == 0:
return sum([a * b for a, b in zip(conf, self.inst.weights)])
elif fitness < 0:
return fitness
class Individual():
def __init__(self, conf, fitness):
self.conf = conf
self.fitness = fitness
| true |
c3db857b0d6e01017d7246cf4234b9dcdea9ddd2 | Python | cktan/gppg | /gppg.py | UTF-8 | 5,495 | 2.5625 | 3 | [] | no_license | import sys, os, csv, subprocess
from StringIO import StringIO
# -----------------------------------------------------
def _pr(prefix, s):
for line in s.split('\n'):
print prefix,line
pr = lambda prefix, s: _pr(prefix, s)
def pr_inf(s): pr('inf', s)
def pr_dst(s): pr('dst', s)
def pr_src(s): pr('src', s)
def set_verbose(flag):
global pr
if flag:
pr = lambda prefix, s: _pr(prefix, s)
else:
pr = lambda prefix, s: None
set_verbose(False)
# -----------------------------------------------------
def dq(s):
c = [c for c in s if c not in 'abcdefghijklmnopqrstuvwxyz_0123456789']
if c: return '"%s"' % s
if s in ['user', 'filter']:
return '"%s"' % s
return s
# -----------------------------------------------------
class Attr:
def __init__(self):
self.cname = ''
self.datatype = ''
self.numericprecision = 0
self.numericscale = 0
self.charmaxlen = 0
def typeClause(self):
a = self
t = a.datatype
if t == 'numeric':
if a.numericprecision and a.numericscale:
t = '%s(%s,%s)' % (t, a.numericprecision, a.numericscale)
elif a.numericprecision:
t = '%s(%s)' % (t, a.numericprecision)
elif t == 'character varying':
if a.charmaxlen:
t = 'varchar(%s)' % a.charmaxlen
else:
t = 'varchar'
elif t == 'character':
if a.charmaxlen: t = 'character(%s)' % a.charmaxlen
elif t == 'json':
t = 'text'
return t
# -----------------------------------------------------
class TableInfo:
def __init__(self):
self.attr = [] # array of Attr
def isSubset(self, t):
for i in xrange(len(self.attr)):
a = self.attr[i]
b = t.attr[i]
if a.cname != b.cname: return False
if a.typeClause() != b.typeClause(): return False
return True
def isEquiv(self, t):
if len(self.attr) != len(t.attr):
return False
return self.isSubset(t);
def columnClause(self):
s = [dq(a.cname) for a in self.attr]
return '\n,'.join(s)
def columnClauseEx(self):
s = []
for a in self.attr:
if a.datatype == 'json':
t = 'substring(%s::text from 1 to 20000) as %s' % (dq(a.cname), dq(a.cname))
if a.typeClause() == 'varchar' or a.datatype == 'text':
t = 'substring(%s from 1 for 5000) as %s' % (dq(a.cname), dq(a.cname))
else:
t = dq(a.cname)
s += [t]
return '\n,'.join(s)
def columnAndTypeClause(self):
s = []
for a in self.attr:
s += ["%s %s" % (dq(a.cname), a.typeClause())]
return '\n,'.join(s)
# -----------------------------------------------------
class DB:
def __init__(self):
self.host = ''
self.port = ''
self.user = ''
self.passwd = ''
self.dbname = ''
self.sname = ''
self.tname = ''
self.prefix = ''
def set_prefix(self, prefix):
self.prefix = prefix
def psql_raw(self, sql, stdin=None, stdout=None, stderr=None):
env = os.environ.copy()
if not self.passwd:
passwd_key = ('PGPASSWORD__%s__%s' % (self.dbname, self.user)).upper()
self.passwd = (env.get(passwd_key, '')).strip()
if not self.passwd:
sys.exit('env %s not set' % passwd_key)
env['PGHOST'] = self.host
env['PGPORT'] = self.port
env['PGUSER'] = self.user
env['PGPASSWORD'] = self.passwd
env['PGDATABASE'] = self.dbname
env['PGOPTIONS'] = '--client-min-messages=warning'
return subprocess.Popen(['psql', '-qAt', '-c', sql], env=env, stdin=stdin, stdout=stdout, stderr=stderr)
def psql(self, sql):
p = self.psql_raw(sql, stdout=subprocess.PIPE)
s = p.stdout.read()
rc = p.wait()
if rc:
sys.exit('ERROR: %s' % sql)
return s.strip()
def psql_quiet(self, sql):
p = self.psql_raw(sql, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
s = p.stdout.read(1024*8)
if not s: break
rc = p.wait()
return rc
def getTableInfo(self):
'''Returns the attributes of the table. If Table is not accessible or if table does not exist, return None.'''
t = TableInfo()
sql = '''copy /* get columns of a table */
(select column_name, data_type, numeric_precision, numeric_scale, character_maximum_length
from information_schema.columns
where table_schema='%s' and table_name='%s' and column_name != 'recxmin'
order by ordinal_position) to stdout with csv header''' % (self.sname, self.tname)
s = self.psql(sql)
for row in csv.DictReader(StringIO(s)):
if row['data_type'] == 'USER-DEFINED': continue
if row['data_type'] == 'xid': continue
a = Attr()
a.cname = row['column_name']
a.datatype = row['data_type']
a.numericprecision = row['numeric_precision']
a.numericscale = row['numeric_scale']
a.charmaxlen = row['character_maximum_length']
t.attr += [a]
if len(t.attr) == 0:
return None
return t
| true |
d5a8f8cc26cfa1ec6828494d94ccb906791fec45 | Python | ariesunique/FSND | /projects/02_trivia_api/starter/backend/test_flaskr.py | UTF-8 | 3,670 | 2.953125 | 3 | [] | no_license | import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question, Category
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_name = "trivia_test"
self.database_path = "postgres:///{}".format(self.database_name)
setup_db(self.app, self.database_path)
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories(self):
res = self.client().get("/categories")
self.assertEqual(res.status_code, 200)
cat_json = res.get_json()
self.assertTrue(cat_json.get("success"), "Success should be present in the response")
cat_dict = cat_json.get("categories")
self.assertEqual(6, len(cat_dict))
expected = {
"1":"Science",
"2":"Art",
"3":"Geography",
"4":"History",
"5":"Entertainment",
"6":"Sports"}
self.assertEqual(expected, cat_dict)
def test_method_not_allowed_categories(self):
methods = ["POST", "PATCH", "DELETE", "PUT"]
url = "/categories"
for method in methods:
res = getattr(self.client(), method.lower())(url)
self.assertEqual(res.status_code, 405, f"Expecting method not allowed error (405) for method {method}")
self.assertTrue(res.get_json().get("error"), "Error attribute should be present in response")
self.assertFalse(res.get_json().get("success"), "Success attribute should be present in response and should be false")
def test_get_questions_by_category(self):
res = self.client().get("/categories/1/questions")
self.assertEqual(res.status_code, 200)
myjson = res.get_json()
self.assertTrue(myjson.get("categories"), "Categories key should exist in the response")
self.assertTrue(myjson.get("success"))
self.assertEqual(1, myjson.get("current_category"), "Current category should match the category passed in the url")
self.assertEqual(3, myjson.get("total_questions"))
self.assertEqual(3, len(myjson.get("questions")))
def test_get_questions_for_nonexistant_category(self):
res = self.client().get("/categories/999/questions")
self.assertEqual(res.status_code, 404, "Expecting page not found error (404) for a non-existent category ")
self.assertTrue(res.get_json().get("error"), "Error attribute should be present in response")
self.assertFalse(res.get_json().get("success"), "Success attribute should be present in response and should be false")
def test_bad_url(self):
res = self.client().get("/bad-url")
self.assertEqual(res.status_code, 404, "Expecting page not found error (404) for a non-existent category ")
self.assertTrue(res.get_json().get("error"), "Error attribute should be present in response")
self.assertFalse(res.get_json().get("success"), "Success attribute should be present in response and should be false")
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main() | true |
02d8ae1626206abb04de31ac537cc88ad0d99a79 | Python | davimi/mountaincar-rl | /src/main/model_visualization.py | UTF-8 | 1,687 | 2.765625 | 3 | [] | no_license | from mountaincar import *
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
def show_3D_plot():
plt.clf()
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(xs, ys, zs, rstride=1, cstride=1, cmap=cm.viridis)
ax.set_xlabel("position")
ax.set_ylabel("speed")
ax.set_zlabel("predicted value")
plt.show()
def show_heatmap(values, xs, ys):
sns.set()
df = pd.DataFrame(values, xs, ys)
ax = sns.heatmap(df)
ax.set_ylabel("position")
ax.set_xlabel("speed")
plt.show()
env = gym.make('MountainCar-v0')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = rl.QLearningAgent(state_size, action_size, epsilon = 0.0, epsilon_min = 0.0)
trained_model = agent.load_model_weights("src/main/resources/model_01.hdf5")
#xs = np.arange(env_min_pos, stop=env_max_pos, step=0.1)
#ys = np.arange(-env_abs_max_speed, stop=env_abs_max_speed, step=0.01)
xs = np.linspace(env_min_pos, env_goal_pos, 20)
ys = np.linspace(-env_abs_max_speed, env_abs_max_speed, 20)
zs = np.empty((len(xs), len(ys)))
actions = np.empty((len(xs), len(ys)))
for x in range(len(xs) - 1 ):
for y in range(len(ys) - 1):
reward = cubic_approximation_reward_flat(xs[x], (xs[x] >= env_goal_pos))
state = normalize_state(np.array([xs[x], ys[y]]), env_max_pos, env_min_pos, env_abs_max_speed)
state = np.reshape(state, [1, state_size])
zs[x][y] = agent.predict_value(reward, state)
actions[x][y] = agent.choose_action(state)
print(zs)
show_heatmap(zs, xs, ys)
#show_heatmap(actions, xs, ys)
#show_3D_plot()
| true |
92125a95cee8bda6318d20c7b77aaa6a01ef8691 | Python | richardgmcmahon/astropy_examples | /votable_example.py | UTF-8 | 3,856 | 2.546875 | 3 | [] | no_license | from __future__ import (absolute_import, division, print_function,
unicode_literals)
"""
Example table i/o for votable with timing comparison
"""
import os
import sys
import time
import numpy as np
t0 = time.time()
import astropy
print('Elapsed time(secs):', time.time() - t0)
print()
print(astropy.__version__)
from astropy.table import Table
from astropy.io.votable import from_table, writeto
nrows = int(1e6)
t0 = time.time()
print('Create data:', nrows, 'rows')
col0 = np.linspace(1, nrows, num=nrows, dtype=np.int32)
col1 = np.linspace(1, nrows, num=nrows, dtype='float32')
col2 = np.linspace(1, nrows, num=nrows, dtype='float32')
print('Elapsed time(secs):', time.time() - t0)
print()
print(col0[0], col0[-1])
print(col1[0], col1[-1])
# table = Table([col1, col2, col3])
table = Table([col0])
table = Table([col0, col1, col2])
print('Elapsed time(secs):', time.time() - t0)
print()
table.info()
print('Elapsed time(secs):', time.time() - t0)
print()
# table['col1'].unit = 'deg'
t0 = time.time()
outfile = 'table.fits'
print('Write:', outfile)
table.write(outfile, overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile)
print('Elapsed time(secs):', time.time() - t0)
input.info()
print()
t0 = time.time()
outfile = 'table_binary.vot'
print('Write:', outfile)
table.write(outfile, table_id='example_table',
format='votable', tabledata_format='binary',
overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile)
print('Elapsed time(secs):', time.time() - t0)
print('Number of rows:', len(input))
input.info()
print()
t0 = time.time()
outfile = 'table_binary2.vot'
print('Write:', outfile)
table.write(outfile, table_id='example_table',
format='votable', tabledata_format='binary2',
overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile)
print('Elapsed time(secs):', time.time() - t0)
print('Number of rows:', len(input))
input.info()
print()
#
t0 = time.time()
outfile = 'table_ascii.vot'
print('Write:', outfile)
table.write(outfile, table_id='example_table',
format='votable',
overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile)
print('Elapsed time(secs):', time.time() - t0)
print('Number of rows:', len(input))
input.info()
print()
# hdf5
t0 = time.time()
outfile = 'table.hdf5'
print('Write:', outfile)
table.write(outfile, path='data',
overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile, path='data')
print('Elapsed time(secs):', time.time() - t0)
print('Number of rows:', len(input))
input.info()
print()
#hdf with compression
t0 = time.time()
outfile = 'table_compressed.hdf5'
print('Write:', outfile)
table.write(outfile, path='data',
compression=True,
overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile, path='data')
print('Elapsed time(secs):', time.time() - t0)
print('Number of rows:', len(input))
print()
# csv
t0 = time.time()
outfile = 'table.csv'
print('Write:', outfile)
table.write(outfile,
overwrite=True)
print('Elapsed time(secs):', time.time() - t0)
print()
t0 = time.time()
infile = outfile
print('Read:', infile)
input = Table.read(infile)
print('Elapsed time(secs):', time.time() - t0)
print('Number of rows:', len(input))
input.info()
print()
| true |
a52763d507b12c92dfe020da1492f2282ecff0de | Python | llhbum/Problem-Solving_Python | /BaekJoon/1193.py | UTF-8 | 203 | 3.3125 | 3 | [] | no_license | N = int(input())
cnt=1
while True:
if N>cnt:
N = N - cnt
cnt += 1
else:
break
if cnt % 2 == 0:
print(f'{N}/{cnt - (N - 1)}')
else:
print(f'{cnt - (N - 1)}/{N}')
| true |
fa3d55516e37d5a23f05c084de90e7674a14bb01 | Python | Azure/azure-sdk-for-python | /sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/_model_base.py | UTF-8 | 23,846 | 2.625 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import functools
import sys
import logging
import base64
import re
import isodate
from json import JSONEncoder
import typing
from datetime import datetime, date, time, timedelta
from azure.core.utils._utils import _FixedOffset
from collections.abc import MutableMapping
from azure.core.exceptions import DeserializationError
from azure.core import CaseInsensitiveEnumMeta
from azure.core.pipeline import PipelineResponse
import copy
_LOGGER = logging.getLogger(__name__)
__all__ = ["NULL", "AzureJSONEncoder", "Model", "rest_field", "rest_discriminator"]
class _Null(object):
"""To create a Falsy object"""
def __bool__(self):
return False
__nonzero__ = __bool__ # Python2 compatibility
NULL = _Null()
"""
A falsy sentinel object which is supposed to be used to specify attributes
with no data. This gets serialized to `null` on the wire.
"""
def _timedelta_as_isostr(td: timedelta) -> str:
"""Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S'
Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython
"""
# Split seconds to larger units
seconds = td.total_seconds()
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
days, hours, minutes = list(map(int, (days, hours, minutes)))
seconds = round(seconds, 6)
# Build date
date_str = ""
if days:
date_str = "%sD" % days
# Build time
time_str = "T"
# Hours
bigger_exists = date_str or hours
if bigger_exists:
time_str += "{:02}H".format(hours)
# Minutes
bigger_exists = bigger_exists or minutes
if bigger_exists:
time_str += "{:02}M".format(minutes)
# Seconds
try:
if seconds.is_integer():
seconds_string = "{:02}".format(int(seconds))
else:
# 9 chars long w/ leading 0, 6 digits after decimal
seconds_string = "%09.6f" % seconds
# Remove trailing zeros
seconds_string = seconds_string.rstrip("0")
except AttributeError: # int.is_integer() raises
seconds_string = "{:02}".format(seconds)
time_str += "{}S".format(seconds_string)
return "P" + date_str + time_str
def _datetime_as_isostr(dt: typing.Union[datetime, date, time, timedelta]) -> str:
"""Converts a datetime.(datetime|date|time|timedelta) object into an ISO 8601 formatted string"""
# First try datetime.datetime
if hasattr(dt, "year") and hasattr(dt, "hour"):
dt = typing.cast(datetime, dt)
# astimezone() fails for naive times in Python 2.7, so make make sure dt is aware (tzinfo is set)
if not dt.tzinfo:
iso_formatted = dt.replace(tzinfo=TZ_UTC).isoformat()
else:
iso_formatted = dt.astimezone(TZ_UTC).isoformat()
# Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt)
return iso_formatted.replace("+00:00", "Z")
# Next try datetime.date or datetime.time
try:
dt = typing.cast(typing.Union[date, time], dt)
return dt.isoformat()
# Last, try datetime.timedelta
except AttributeError:
dt = typing.cast(timedelta, dt)
return _timedelta_as_isostr(dt)
try:
from datetime import timezone
TZ_UTC = timezone.utc # type: ignore
except ImportError:
TZ_UTC = _FixedOffset(0) # type: ignore
def _serialize_bytes(o) -> str:
return base64.b64encode(o).decode()
def _serialize_datetime(o):
if hasattr(o, "year") and hasattr(o, "hour"):
# astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set)
if not o.tzinfo:
iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat()
else:
iso_formatted = o.astimezone(TZ_UTC).isoformat()
# Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt)
return iso_formatted.replace("+00:00", "Z")
# Next try datetime.date or datetime.time
return o.isoformat()
def _is_readonly(p):
try:
return p._readonly
except AttributeError:
return False
class AzureJSONEncoder(JSONEncoder):
"""A JSON encoder that's capable of serializing datetime objects and bytes."""
def default(self, o): # pylint: disable=too-many-return-statements
if _is_model(o):
readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)]
return {k: v for k, v in o.items() if k not in readonly_props}
if isinstance(o, (bytes, bytearray)):
return base64.b64encode(o).decode()
try:
return super(AzureJSONEncoder, self).default(o)
except TypeError:
if isinstance(o, (bytes, bytearray)):
return _serialize_bytes(o)
try:
# First try datetime.datetime
return _serialize_datetime(o)
except AttributeError:
pass
# Last, try datetime.timedelta
try:
return _timedelta_as_isostr(o)
except AttributeError:
# This will be raised when it hits value.total_seconds in the method above
pass
return super(AzureJSONEncoder, self).default(o)
_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?")
def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime:
"""Deserialize ISO-8601 formatted string into Datetime object.
:param str attr: response string to be deserialized.
:rtype: ~datetime.datetime
"""
if isinstance(attr, datetime):
# i'm already deserialized
return attr
attr = attr.upper()
match = _VALID_DATE.match(attr)
if not match:
raise ValueError("Invalid datetime string: " + attr)
check_decimal = attr.split(".")
if len(check_decimal) > 1:
decimal_str = ""
for digit in check_decimal[1]:
if digit.isdigit():
decimal_str += digit
else:
break
if len(decimal_str) > 6:
attr = attr.replace(decimal_str, decimal_str[0:6])
date_obj = isodate.parse_datetime(attr)
test_utc = date_obj.utctimetuple()
if test_utc.tm_year > 9999 or test_utc.tm_year < 1:
raise OverflowError("Hit max or min date")
return date_obj
def _deserialize_date(attr: typing.Union[str, date]) -> date:
"""Deserialize ISO-8601 formatted string into Date object.
:param str attr: response string to be deserialized.
:rtype: Date
"""
# This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception.
if isinstance(attr, date):
return attr
return isodate.parse_date(attr, defaultmonth=None, defaultday=None)
def _deserialize_time(attr: typing.Union[str, time]) -> time:
"""Deserialize ISO-8601 formatted string into time object.
:param str attr: response string to be deserialized.
:rtype: datetime.time
"""
if isinstance(attr, time):
return attr
return isodate.parse_time(attr)
def deserialize_bytes(attr):
if isinstance(attr, (bytes, bytearray)):
return attr
return bytes(base64.b64decode(attr))
def deserialize_duration(attr):
if isinstance(attr, timedelta):
return attr
return isodate.parse_duration(attr)
_DESERIALIZE_MAPPING = {
datetime: _deserialize_datetime,
date: _deserialize_date,
time: _deserialize_time,
bytes: deserialize_bytes,
timedelta: deserialize_duration,
typing.Any: lambda x: x,
}
def _get_model(module_name: str, model_name: str):
models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)}
module_end = module_name.rsplit(".", 1)[0]
module = sys.modules[module_end]
models.update({k: v for k, v in module.__dict__.items() if isinstance(v, type)})
model_name = model_name.split(".")[-1]
if model_name not in models:
return model_name
return models[model_name]
_UNSET = object()
class _MyMutableMapping(MutableMapping):
def __init__(self, data: typing.Dict[str, typing.Any]) -> None:
self._data = copy.deepcopy(data)
def __contains__(self, key: str) -> bool:
return key in self._data
def __getitem__(self, key: str) -> typing.Any:
return self._data.__getitem__(key)
def __setitem__(self, key: str, value: typing.Any) -> None:
self._data.__setitem__(key, value)
def __delitem__(self, key: str) -> None:
self._data.__delitem__(key)
def __iter__(self) -> typing.Iterator[typing.Any]:
return self._data.__iter__()
def __len__(self) -> int:
return self._data.__len__()
def __ne__(self, other: typing.Any) -> bool:
return not self.__eq__(other)
def keys(self) -> typing.KeysView:
return self._data.keys()
def values(self) -> typing.ValuesView:
return self._data.values()
def items(self) -> typing.ItemsView:
return self._data.items()
def get(self, key: str, default: typing.Any = None) -> typing.Any:
try:
return self[key]
except KeyError:
return default
@typing.overload
def pop(self, key: str) -> typing.Any:
...
@typing.overload
def pop(self, key: str, default: typing.Any) -> typing.Any:
...
def pop(self, key: typing.Any, default: typing.Any = _UNSET) -> typing.Any:
if default is _UNSET:
return self._data.pop(key)
return self._data.pop(key, default)
def popitem(self) -> typing.Tuple[str, typing.Any]:
return self._data.popitem()
def clear(self) -> None:
self._data.clear()
def update(self, *args: typing.Any, **kwargs: typing.Any) -> None:
self._data.update(*args, **kwargs)
@typing.overload
def setdefault(self, key: str) -> typing.Any:
...
@typing.overload
def setdefault(self, key: str, default: typing.Any) -> typing.Any:
...
def setdefault(self, key: typing.Any, default: typing.Any = _UNSET) -> typing.Any:
if default is _UNSET:
return self._data.setdefault(key)
return self._data.setdefault(key, default)
def __eq__(self, other: typing.Any) -> bool:
try:
other_model = self.__class__(other)
except Exception:
return False
return self._data == other_model._data
def __repr__(self) -> str:
return str(self._data)
def _is_model(obj: typing.Any) -> bool:
return getattr(obj, "_is_model", False)
def _serialize(o):
if isinstance(o, (bytes, bytearray)):
return _serialize_bytes(o)
try:
# First try datetime.datetime
return _serialize_datetime(o)
except AttributeError:
pass
# Last, try datetime.timedelta
try:
return _timedelta_as_isostr(o)
except AttributeError:
# This will be raised when it hits value.total_seconds in the method above
pass
return o
def _get_rest_field(
attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str
) -> typing.Optional["_RestField"]:
try:
return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name)
except StopIteration:
return None
def _create_value(rest_field: typing.Optional["_RestField"], value: typing.Any) -> typing.Any:
return _deserialize(rest_field._type, value) if (rest_field and rest_field._is_model) else _serialize(value)
class Model(_MyMutableMapping):
_is_model = True
def __init__(self, *args, **kwargs):
class_name = self.__class__.__name__
if len(args) > 1:
raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given")
dict_to_pass = {
rest_field._rest_name: rest_field._default
for rest_field in self._attr_to_rest_field.values()
if rest_field._default is not _UNSET
}
if args:
dict_to_pass.update(
{k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()}
)
else:
non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field]
if non_attr_kwargs:
# actual type errors only throw the first wrong keyword arg they see, so following that.
raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'")
dict_to_pass.update({self._attr_to_rest_field[k]._rest_name: _serialize(v) for k, v in kwargs.items()})
super().__init__(dict_to_pass)
def copy(self):
return Model(self.__dict__)
def __new__(cls, *args: typing.Any, **kwargs: typing.Any):
# we know the last three classes in mro are going to be 'Model', 'dict', and 'object'
mros = cls.__mro__[:-3][::-1] # ignore model, dict, and object parents, and reverse the mro order
attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property
k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type")
}
annotations = {
k: v
for mro_class in mros
if hasattr(mro_class, "__annotations__")
for k, v in mro_class.__annotations__.items()
}
for attr, rest_field in attr_to_rest_field.items():
rest_field._module = cls.__module__
if not rest_field._type:
rest_field._type = rest_field._get_deserialize_callable_from_annotation(annotations.get(attr, None))
if not rest_field._rest_name_input:
rest_field._rest_name_input = attr
cls._attr_to_rest_field: typing.Dict[str, _RestField] = {k: v for k, v in attr_to_rest_field.items()}
return super().__new__(cls)
def __init_subclass__(cls, discriminator=None):
for base in cls.__bases__:
if hasattr(base, "__mapping__"):
base.__mapping__[discriminator or cls.__name__] = cls
@classmethod
def _get_discriminator(cls) -> typing.Optional[str]:
for v in cls.__dict__.values():
if isinstance(v, _RestField) and v._is_discriminator:
return v._rest_name
return None
@classmethod
def _deserialize(cls, data):
if not hasattr(cls, "__mapping__"):
return cls(data)
discriminator = cls._get_discriminator()
mapped_cls = cls.__mapping__.get(data.get(discriminator), cls)
if mapped_cls == cls:
return cls(data)
return mapped_cls._deserialize(data)
def _get_deserialize_callable_from_annotation(
annotation: typing.Any,
module: str,
) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]:
if not annotation or annotation in [int, float]:
return None
try:
if _is_model(_get_model(module, annotation)):
def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj):
if _is_model(obj):
return obj
return _deserialize(model_deserializer, obj)
return functools.partial(_deserialize_model, _get_model(module, annotation))
except Exception:
pass
# is it a literal?
try:
if annotation.__origin__ == typing.Literal:
return None
except AttributeError:
pass
if isinstance(annotation, typing._GenericAlias): # pylint: disable=protected-access
if annotation.__origin__ is typing.Union:
def _deserialize_with_union(union_annotation: typing._GenericAlias, obj):
for t in union_annotation.__args__:
try:
return _deserialize(t, obj, module)
except DeserializationError:
pass
raise DeserializationError()
return functools.partial(_deserialize_with_union, annotation)
# is it optional?
try:
# right now, assuming we don't have unions, since we're getting rid of the only
# union we used to have in msrest models, which was union of str and enum
if any(a for a in annotation.__args__ if a == type(None)):
if_obj_deserializer = _get_deserialize_callable_from_annotation(
next(a for a in annotation.__args__ if a != type(None)), module
)
def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj):
if obj is None:
return obj
return _deserialize_with_callable(if_obj_deserializer, obj)
return functools.partial(_deserialize_with_optional, if_obj_deserializer)
except (AttributeError):
pass
# is it a forward ref / in quotes?
if isinstance(annotation, str) or type(annotation) == typing.ForwardRef:
try:
model_name = annotation.__forward_arg__ # type: ignore
except AttributeError:
model_name = annotation
if module is not None:
annotation = _get_model(module, model_name)
try:
if annotation._name == "Dict":
key_deserializer = _get_deserialize_callable_from_annotation(annotation.__args__[0], module)
value_deserializer = _get_deserialize_callable_from_annotation(annotation.__args__[1], module)
def _deserialize_dict(
key_deserializer: typing.Optional[typing.Callable],
value_deserializer: typing.Optional[typing.Callable],
obj: typing.Dict[typing.Any, typing.Any],
):
if obj is None:
return obj
return {
_deserialize(key_deserializer, k, module): _deserialize(value_deserializer, v, module)
for k, v in obj.items()
}
return functools.partial(
_deserialize_dict,
key_deserializer,
value_deserializer,
)
except (AttributeError, IndexError):
pass
try:
if annotation._name in ["List", "Set", "Tuple", "Sequence"]:
if len(annotation.__args__) > 1:
def _deserialize_multiple_sequence(
entry_deserializers: typing.List[typing.Optional[typing.Callable]], obj
):
if obj is None:
return obj
return type(obj)(
_deserialize(deserializer, entry, module)
for entry, deserializer in zip(obj, entry_deserializers)
)
entry_deserializers = [
_get_deserialize_callable_from_annotation(dt, module) for dt in annotation.__args__
]
return functools.partial(_deserialize_multiple_sequence, entry_deserializers)
deserializer = _get_deserialize_callable_from_annotation(annotation.__args__[0], module)
def _deserialize_sequence(
deserializer: typing.Optional[typing.Callable],
obj,
):
if obj is None:
return obj
return type(obj)(_deserialize(deserializer, entry, module) for entry in obj)
return functools.partial(_deserialize_sequence, deserializer)
except (TypeError, IndexError, AttributeError, SyntaxError):
pass
def _deserialize_default(
annotation,
deserializer_from_mapping,
obj,
):
if obj is None:
return obj
try:
return _deserialize_with_callable(annotation, obj)
except Exception:
pass
return _deserialize_with_callable(deserializer_from_mapping, obj)
return functools.partial(_deserialize_default, annotation, _DESERIALIZE_MAPPING.get(annotation))
def _deserialize_with_callable(
deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], value: typing.Any
):
try:
if value is None:
return None
if isinstance(deserializer, CaseInsensitiveEnumMeta):
try:
return deserializer(value)
except ValueError:
# for unknown value, return raw value
return value
if isinstance(deserializer, type) and issubclass(deserializer, Model):
return deserializer._deserialize(value)
return deserializer(value) if deserializer else value
except Exception as e:
raise DeserializationError() from e
def _deserialize(
deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], value: typing.Any, module: str = ""
):
if isinstance(value, PipelineResponse):
value = value.http_response.json()
deserializer = _get_deserialize_callable_from_annotation(deserializer, module)
return _deserialize_with_callable(deserializer, value)
class _RestField:
def __init__(
self,
*,
name: typing.Optional[str] = None,
type: typing.Optional[typing.Callable] = None,
is_discriminator: bool = False,
readonly: bool = False,
default: typing.Any = _UNSET,
):
self._type = type
self._rest_name_input = name
self._module: typing.Optional[str] = None
self._is_discriminator = is_discriminator
self._readonly = readonly
self._is_model = False
self._default = default
@property
def _rest_name(self) -> str:
if self._rest_name_input is None:
raise ValueError("Rest name was never set")
return self._rest_name_input
def __get__(self, obj: Model, type=None):
# by this point, type and rest_name will have a value bc we default
# them in __new__ of the Model class
item = obj.get(self._rest_name)
if item is None:
return item
return _deserialize(self._type, _serialize(item))
def __set__(self, obj: Model, value) -> None:
if value is None:
# we want to wipe out entries if users set attr to None
try:
obj.__delitem__(self._rest_name)
except KeyError:
pass
return
if self._is_model and not _is_model(value):
obj.__setitem__(self._rest_name, _deserialize(self._type, value))
obj.__setitem__(self._rest_name, _serialize(value))
def _get_deserialize_callable_from_annotation(
self, annotation: typing.Any
) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]:
return _get_deserialize_callable_from_annotation(annotation, self._module)
def rest_field(
*,
name: typing.Optional[str] = None,
type: typing.Optional[typing.Callable] = None,
readonly: bool = False,
default: typing.Any = _UNSET,
) -> typing.Any:
return _RestField(name=name, type=type, readonly=readonly, default=default)
def rest_discriminator(
*, name: typing.Optional[str] = None, type: typing.Optional[typing.Callable] = None
) -> typing.Any:
return _RestField(name=name, type=type, is_discriminator=True)
| true |
2f2b9b20b9114e686cfe821da0b3406ace28f438 | Python | CrazyEzh/KnightInDungeon | /MapFactory.py | UTF-8 | 14,594 | 2.53125 | 3 | [] | no_license | class MapFactory(yaml.YAMLObject):
@classmethod
def from_yaml(cls, loader, node):
_map = cls.create_map()
_obj = cls.create_object()
return {'map': _map, 'obj': _obj}
@classmethod
def create_map(cls):
return cls.Map()
@classmethod
def create_object(cls):
return cls.Objects()
class EndMap(MapFactory):
yaml_tag = "!end_map"
class Map:
def __init__(self):
self.MapList = ['000000000000000000000000000000000000000',
'0 0',
'0 0',
'0 0 0 000 0 0 00000 0 0 0',
'0 0 0 0 0 0 0 0 0 0 0',
'0 000 0 0 00000 0000 0 0 0',
'0 0 0 0 0 0 0 0 0 0 0',
'0 0 0 000 0 0 00000 00000 0',
'0 0 0',
'0 0',
'000000000000000000000000000000000000000'
]
self.Map = list(map(list, self.MapList))
for i in self.Map:
for j in range(len(i)):
i[j] = wall if i[j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
return self.objects
class RandomMap(MapFactory):
yaml_tag = "!random_map"
class Map:
def __init__(self):
self.MapList = [[0 for _ in range(41)] for _ in range(41)]
self.Map = list(map(list, self.MapList))
for i in range(41):
for j in range(41):
if i == 0 or j == 0 or i == 40 or j == 40:
self.Map[j][i] = wall
else:
self.Map[j][i] = [wall, floor1, floor2, floor3, floor1,
floor2, floor3, floor1, floor2][random.randint(0, 8)]
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 39), random.randint(1, 39))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 39), random.randint(1, 39))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['enemies']:
prop = object_list_prob['enemies'][obj_name]
for i in range(random.randint(0, 5)):
coord = (random.randint(1, 30), random.randint(1, 22))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Enemy(
prop['sprite'], prop, prop['experience'], coord))
return self.objects
class EmptyMap(MapFactory):
yaml_tag = "!empty_map"
class Map:
def __init__(self):
self.MapList = ['000000000000000000000000000000',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'000000000000000000000000000000'
]
self.Map = list(map(list, self.MapList))
for i in range(len(self.Map)):
for j in range(len(self.Map[0])):
self.Map[i][j] = wall if self.Map[i][j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
prop = object_list_prob['objects']["stairs"]
coord = (random.randint(1, len(_map[0]) - 1), random.randint(1, len(_map) - 1))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, len(_map[0]) - 1), random.randint(1, len(_map) - 1))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, len(_map[0]) - 1), random.randint(1, len(_map) - 1))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
return self.objects
class SpecialMap(MapFactory):
yaml_tag = "!special_map"
class Map:
def __init__(self):
self.Map = self.get_table()
self.maze_generator()
self.MapList = self.Map.copy()
for i in range(41):
for j in range(41):
if i == 0 or j == 0 or i == 40 or j == 40:
self.Map[j][i] = wall
continue
if self.Map[j][i] == 0:
if random.randint(0, 4) != 1: # убираем 20% стен в случайном порядке
self.Map[j][i] = wall
continue
self.Map[j][i] = floor1
def maze_generator(self):
current_position = (1, 1)
self.Map[1][1] = 1
unvisited_list = self.get_unvisited_cells()
height = len(self.Map[0]) - 1
width = len(self.Map) - 1
path = [(1, 1)]
while len(unvisited_list) > 0:
neighbours = self.get_neighbours(current_position, width, height)
if len(neighbours) > 0:
rand_cell = random.randint(0, len(neighbours) - 1)
neighbour = neighbours[rand_cell]
self.remove_wall(current_position, neighbour)
current_position = (neighbour[0], neighbour[1])
self.Map[neighbour[0]][neighbour[1]] = 1
path.append(current_position)
elif len(path) > 0:
current_position = path.pop()
else:
rnd_unvisited = random.randint(0, len(unvisited_list) - 1)
current_position = unvisited_list[rnd_unvisited]
if current_position in unvisited_list:
unvisited_list.remove(current_position)
# unvisited_list = self.get_unvisited_cells()
def remove_wall(self, start, end):
x_diff = end[0] - start[0]
y_diff = end[1] - start[1]
add_x = x_diff / abs(x_diff) if x_diff != 0 else 0
add_y = y_diff / abs(y_diff) if y_diff != 0 else 0
target_x = start[0] + int(add_x)
target_y = start[1] + int(add_y)
self.Map[target_x][target_y] = 1
def get_unvisited_cells(self):
result = []
for i in range(len(self.Map)):
for j in range(len(self.Map[0])):
if self.Map[j][i] == " ":
result.append((j, i))
return result
def get_neighbours(self, cell, width, height):
cells = []
cells.append((cell[0], cell[1] - 2))
cells.append((cell[0], cell[1] + 2))
cells.append((cell[0] - 2, cell[1]))
cells.append((cell[0] + 2, cell[1]))
result = []
for i in cells:
if 0 < i[0] < width and 0 < i[1] < height:
if self.Map[i[0]][i[1]] == " ":
result.append(i)
return result
def get_table(self):
_map = [[0 for _ in range(41)] for _ in range(41)]
for i in range(41):
for j in range(41):
if i % 2 == 0 or j % 2 == 0:
_map[j][i] = 0
else:
_map[j][i] = " "
return _map
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
for obj_name in object_list_prob['enemies']:
prop = object_list_prob['enemies'][obj_name]
for i in range(random.randint(0, 5)):
coord = (random.randint(1, 30), random.randint(1, 22))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == (1, 1) or coord == obj.position:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Enemy(
prop['sprite'], prop, prop['experience'], coord))
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 39), random.randint(1, 39))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 39), random.randint(1, 39))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
return self.objects | true |
1e5160511a1bed4d3e78b04d0d8e9f2e03b07305 | Python | anuj-nair/DSA | /Recursion/lenString.py | UTF-8 | 227 | 2.9375 | 3 | [] | no_license | from trace_recursion import trace
def str_len(n):
if not n:
return 0
return 1 + str_len(n[1:])
str_len = trace(str_len)
print(str_len("Thomas"))
print(str_len("Ramu"))
print(str_len("ShivaRamaKrishnaIyer"))
| true |
b4e249f3437f82225b56aa9ac5f6cc9d88f1b17f | Python | anamariadem/University | /Semester 1/FP/Assignment 10/Obstruction/tests/test_board.py | UTF-8 | 1,245 | 3.390625 | 3 | [] | no_license | import unittest
from Obstruction.domain.board import *
class test_board(unittest.TestCase):
def test_create_board (self):
b = Board(2,3)
self.assertEqual(b.rows, 2)
self.assertEqual(b.columns, 3)
def test_move (self):
b = Board(2,3)
b.move(1,1,"X")
b.move(0,0,"o")
self.assertEqual(b.square(1,1), 1)
self.assertEqual(b.square(0,0), -1)
def test_square (self):
b = Board(2,3)
i = 0
while i < b.rows:
j = 0
while j < b.columns:
self.assertEqual(b.square(i,j), 0)
j += 1
i += 1
def test_valid_move (self):
b = Board(3,3)
b.move(0, 0, "x")
b.move(2, 0, "x")
b.move(0, 2, "x")
b.move(2, 2, "x")
i = 0
while i < b.rows:
j = 0
while j < b.columns:
self.assertEqual(b.valid_move(i, j), False)
j += 1
i += 1
def test_is_won (self):
b = Board(3,3)
b.move(0,0,"x")
b.move(2, 0, "x")
b.move(0, 2, "x")
b.move(2, 2, "x")
self.assertEqual(b.is_won(), True)
if __name__ == '__main__':
unittest.main()
| true |
57f062d9e174d0c4518c2815ef780b0093b87c81 | Python | FRmathieu13/Homework_s3-4 | /homework.py | UTF-8 | 324 | 3.6875 | 4 | [] | no_license | prompt = 'what is your name'
name = input(prompt)
print("hello "+name)
prompt = 'give the 1st nbr'
a = input(prompt)
a = int(a)
prompt = 'give the 2nd nbr'
b = input(prompt)
b = int(b)
print(int(a/b))
prompt = "give ma the radius of a circle to get the surface"
r = input(prompt)
r = int(r)
import math
print(math.pi*r*r) | true |
e8066b4bff504ef4da79c63811cc1e2db351142f | Python | kmzn128/atcoder | /b169/a.py | UTF-8 | 90 | 3.140625 | 3 | [] | no_license | A, B = map(int, input().split())
def Main(A, B):
return A*B
print(Main(A, B))
| true |
a89fe23ed90039796019b23b53a5f6a4b4ed04cf | Python | mingzidouzhemenanqi/Getting-started-with-python-code | /Files from MOOC/Python实例/第一期MOOC实例/实例四:简易文本进度条/简易文本进度条刷新模拟.py | UTF-8 | 214 | 3.234375 | 3 | [] | no_license | #简易文本进度条刷新模拟
import time as t
jindu=10
for i in range(jindu+1):
a=i*'*'
b=(jindu-i)*'.'
c=i*10
print("\r{:^3.0f}%[{}->{}]".format(c,a,b),end=" ")
t.sleep(1)
| true |
08fc8f42e3ed4b052c658b720e26eef092f31e64 | Python | leer752/battleship | /code/g_func/menus.py | UTF-8 | 3,962 | 2.9375 | 3 | [] | no_license | from foundation import init_screen
from g_func import g_var, draw_grids
from during_game import scores
import pygame
# Main menu before game starts that prompts player to either begin or quit; calls the Main function
def main_menu():
init_screen.screen.fill(g_var.blue)
text = g_var.title_font.render("BATTLESHIP", 1, g_var.white)
init_screen.screen.blit(text, (393, 200))
start_button = pygame.Rect(350, 270, 300, 60)
quit_button = pygame.Rect(350, 350, 300, 60)
pygame.draw.rect(init_screen.screen, g_var.white, start_button)
pygame.draw.rect(init_screen.screen, g_var.white, quit_button)
text = g_var.game_font.render("start game", 1, g_var.blue)
init_screen.screen.blit(text, (440, 290))
text = g_var.game_font.render("quit", 1, g_var.blue)
init_screen.screen.blit(text, (485, 370))
pygame.display.flip()
selecting = True
import main
while selecting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if start_button.collidepoint(event.pos):
main.main()
elif quit_button.collidepoint(event.pos):
pygame.quit()
# Victory sub-menu
def victory_menu(enemy_grid):
init_screen.screen.fill(g_var.blue)
draw_grids.draw_enemy_grid(enemy_grid)
scores.draw_enemy_score(enemy_grid)
text = g_var.title_font.render("YOU WON!", 1, g_var.white)
init_screen.screen.blit(text, (210, 110))
play_again_button = pygame.Rect(150, 170, 300, 60)
quit_button = pygame.Rect(150, 250, 300, 60)
pygame.draw.rect(init_screen.screen, g_var.white, play_again_button)
pygame.draw.rect(init_screen.screen, g_var.white, quit_button)
text = g_var.game_font.render("play again", 1, g_var.blue)
init_screen.screen.blit(text, (250, 190))
text = g_var.game_font.render("quit", 1, g_var.blue)
init_screen.screen.blit(text, (280, 270))
pygame.display.flip()
pygame.event.pump()
selecting = True
while selecting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if play_again_button.collidepoint(event.pos):
return "play again"
elif quit_button.collidepoint(event.pos):
return "quit"
# Defeat sub-menu
def defeat_menu(player_grid):
init_screen.screen.fill(g_var.blue)
draw_grids.draw_player_grid(player_grid)
scores.draw_player_score(player_grid)
text = g_var.title_font.render("YOU LOST!", 1, g_var.white)
init_screen.screen.blit(text, (660, 110))
play_again_button = pygame.Rect(600, 170, 300, 60)
quit_button = pygame.Rect(600, 250, 300, 60)
pygame.draw.rect(init_screen.screen, g_var.white, play_again_button)
pygame.draw.rect(init_screen.screen, g_var.white, quit_button)
text = g_var.game_font.render("play again", 1, g_var.blue)
init_screen.screen.blit(text, (700, 190))
text = g_var.game_font.render("quit", 1, g_var.blue)
init_screen.screen.blit(text, (730, 270))
pygame.display.flip()
pygame.event.pump()
selecting = True
while selecting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if play_again_button.collidepoint(event.pos):
return "play again"
elif quit_button.collidepoint(event.pos):
return "quit"
| true |
e2785a00953e02ff0d9955fac61805cb3736ae91 | Python | hhsalik/staj | /02-19-Cuma/forLoop.py | UTF-8 | 368 | 4.1875 | 4 | [
"MIT"
] | permissive | # for loops
# for letter in "Cihat Salik":
# print(letter)
friends = ["Hasan", "Mahmut", "Ali", "Veli"]
for friend in friends:
print(friend)
for index in range(3, 10):
print(index)
for index in range(len(friends)):
print(friends[index])
for index in range(5):
if index == 0:
print("First Iteration")
else:
print("Not first")
| true |
e5c65dab0dc1e60df083facd5a398a92a2dfb064 | Python | xiangys0134/python_study | /new-day02/07.对象/05.反射.py | UTF-8 | 623 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
class daliu:
def __init__(self):
pass
def chi(self):
print("大牛一顿吃100个螃蟹")
def he(self):
print("大牛一顿喝100频可乐")
def la(self):
print("大牛不用拉")
def shui(self):
print("大牛一次睡一年")
def country(self):
print('国家')
# func = input('输入:').strip()
# #
# # a = daliu()
# # ret = getattr(daliu,func)
# #
# # ret1 = hasattr(daliu,func)
# # print(ret())
# # print(ret1)
s1 = 'country'
cl = daliu()
ret = getattr(daliu,s1)
print(ret)
print(ret(cl))
| true |
7467219300bb562fb9a58a2effcb9918658add8e | Python | csdnak/McPython | /hodnik_01.py | UTF-8 | 2,972 | 2.609375 | 3 | [] | no_license | #ispred lika hodnik
import time
from crtanje import * #tu je funkcija koju zovem
from mc import * #import api-ja
mc = Minecraft() #inicijalizacija sustava za rad sa Minecraftom
def hodnik_01 ( orMj , orSm , iX=0 , iZ=0 , iY=0 , duzina= 3 , materijal = 98, dv = 0 , stepenice_mat = 109 ):
"""
ispred lika soba 10 x 10
iX, - relativni pomak po X
iZ, - relativni pomak po Z
iY , - relativni pomak po Y
duzina - koliko dug u koracima po 10
materijal - materijal zidova okolo - default stonebrick block
dv - modifikator
"""
#gdje sam
orMj = premjesti_origin ( orMj , iX , iZ , iY , orSm ) #mice ishodiste na centar
for br in range ( duzina ):
crtaj_kvadar ( orMj , (0,-4,-1) , (17,4,4) , orSm , 95 , dv ) #zidovi - zatvoreno na daljem kraju otvoreno na pocetku
crtaj_kvadar ( orMj , (0,-3,0) , (16,3,3) , orSm , 0 , blok_dv = 0 ) #rupa
#lampe u podu
dY = -1
dZ = 0
for dX in ( 0 , 8 , 16 ):
gdje = rel2abs ( orMj , ( dX , dZ , dY ) , orSm ) #relativne koordinate u apsolutne worlda
mc.setBlock( gdje , 89 )
#pasice u uglovima gore
crtaj_kvadar ( orMj , (0,-3,3) , (16,-3,3) , orSm , materijal , dv ) #lijeva pasica
crtaj_kvadar ( orMj , (0,3,3) , (16,3,3) , orSm , materijal , dv ) #desna pasica
#poprecna pasica
crtaj_kvadar ( orMj , (8,-3,3) , (8,3,3) , orSm , materijal , dv ) #popreko
#stupovi
crtaj_kvadar ( orMj , (8,-3,0) , (8,-3,3) , orSm , materijal , dv ) #lijevi stup
crtaj_stepenice ( orMj , (7,-3,2) , (7,-3,2) , orSm , blok_id = stepenice_mat , rel_smjer = "meni" , gore_dolje = "da" ) #prema meni
crtaj_stepenice ( orMj , (8,-2,2) , (8,-2,2) , orSm , blok_id = stepenice_mat , rel_smjer = "desno" , gore_dolje = "da" ) #u sredini
crtaj_stepenice ( orMj , (9,-3,2) , (9,-3,2) , orSm , blok_id = stepenice_mat , rel_smjer = "odmene" , gore_dolje = "da" ) #u sredini
crtaj_kvadar ( orMj , (8,3,0) , (8,3,3) , orSm , materijal , dv ) #desni stup
crtaj_stepenice ( orMj , (7,3,2) , (7,3,2) , orSm , blok_id = stepenice_mat , rel_smjer = "meni" , gore_dolje = "da" ) #prema meni
crtaj_stepenice ( orMj , (8,2,2) , (8,2,2) , orSm , blok_id = stepenice_mat , rel_smjer = "lijevo" , gore_dolje = "da" ) #u sredini
crtaj_stepenice ( orMj , (9,3,2) , (9,3,2) , orSm , blok_id = stepenice_mat , rel_smjer = "odmene" , gore_dolje = "da" ) #u sredini
orMj = premjesti_origin ( orMj , 16 , 0 , 0 , orSm ) #mice ishodiste za slijedeci korak
time.sleep ( 2 )
return 1
if __name__ == "__main__": #direktan poziv
#polukrugTunel ( iX=2 , iZ=0 , iY=0 , radius = 8 , duzina = 70 , korekcija = 0 , uspon = 0 )
orMj = gdjeSam ()
orSm = gdjeGledam ()
hodnik_01 ( orMj , orSm , iX=0 , iZ=0 , iY=0 , duzina= 6 , materijal = 98, dv = 0 , stepenice_mat = 109 )
| true |
9cb113467af50042d77157f6e1898aa703661431 | Python | akhramshaik/Machine-Learning | /My Notes/Visualization/EDA_Univariate.py | UTF-8 | 801 | 3.203125 | 3 | [] | no_license | import pandas as pd
import seaborn as sns
titanic_train = pd.read_csv('C:/Users/akhram/Desktop/AIML/Machine Learning/Problems/Titanic/train.csv')
#This is sort of plot using Pandas
pd.crosstab(index=titanic_train["Survived"], columns="count")
#The below are actual plots using sns
sns.countplot(x='Survived',data=titanic_train)
sns.boxplot(x='Fare',data=titanic_train)
#continuous features: visual EDA
titanic_train['Fare'].describe()
sns.boxplot(x='Fare',data=titanic_train)
sns.distplot(titanic_train['Fare'])
sns.distplot(titanic_train['Fare'], hist=False)
sns.distplot(titanic_train['Age'], hist=False)
sns.boxplot(x='Age',data=titanic_train)
sns.distplot(titanic_train['SibSp'], hist=False)
sns.boxplot(x='SibSp',data=titanic_train)
sns.boxplot(x='Survived',y='Fare',data=titanic_train) | true |
839668ae55d89b0dfaccb339e21a24d0d794cf53 | Python | acererak/convex-hull | /TurtleCanvas.py | UTF-8 | 4,032 | 3.140625 | 3 | [] | no_license | from math import sin,cos
from tkinter import *
from Turtle import Turtle, Position2D, Segment2D
class TurtleCanvas(Frame):
def draw_grid(self):
graph_paper_bg = '#c0d0a0'
graph_paper_fg = '#80b060'
major_grid_lines = 8 # this needs to be even
minor_grid_lines = 5
grid_lines = major_grid_lines * minor_grid_lines + 1
scale = (self.size - 2.0 * self.border_size) / (grid_lines - 1)
min_x = self.border_size
max_x = self.size - self.border_size
min_y = self.border_size
max_y = self.size - self.border_size
self.canvas.create_rectangle(0.0,0.0,self.size,self.size,
outline=graph_paper_fg, \
fill=graph_paper_bg)
center_x = self.size / 2.0
center_y = self.size / 2.0
for i in range(-(grid_lines//2), +grid_lines//2 + 1):
y = center_y + i * scale
if i % minor_grid_lines == 0:
self.canvas.create_line(min_x,y,max_x,y,fill=graph_paper_fg,width=2)
else:
self.canvas.create_line(min_x,y,max_x,y,fill=graph_paper_fg,width=1)
for i in range(-(grid_lines//2), +grid_lines//2 + 1):
x = center_x + i * scale
if i % minor_grid_lines == 0:
self.canvas.create_line(x,min_y,x,max_y,fill=graph_paper_fg,width=2)
else:
self.canvas.create_line(x,min_y,x,max_y,fill=graph_paper_fg,width=1)
def draw_turtle(self,turtle):
center_x = self.size / 2.0
center_y = self.size / 2.0
turtle_x = center_x + turtle.location.x
turtle_y = center_y - turtle.location.y
turtle_head_color = '#508000'
self.canvas.create_oval(turtle_x + 12.0 * cos(turtle.heading_radians) - 5.0, \
turtle_y - 12.0 * sin(turtle.heading_radians) + 5.0, \
turtle_x + 12.0 * cos(turtle.heading_radians) + 5.0, \
turtle_y - 12.0 * sin(turtle.heading_radians) - 5.0, \
fill = turtle_head_color, \
outline = turtle_head_color)
turtle_body_color = '#50b000'
self.canvas.create_oval(turtle_x - 10.0, \
turtle_y + 10.0, \
turtle_x + 10.0, \
turtle_y - 10.0, \
fill = turtle_body_color, \
outline = turtle_body_color)
def draw_segment(self,segment,ink):
center_x = self.size / 2.0
center_y = self.size / 2.0
x1 = center_x + segment.start.x
y1 = center_y - segment.start.y
x2 = center_x + segment.end.x
y2 = center_y - segment.end.y
self.canvas.create_line(x1,y1,x2,y2,fill=ink,width=2)
def draw_mark(self,loc,ink):
center_x = self.size / 2.0
center_y = self.size / 2.0
x1 = center_x + loc.x - 5.0
y1 = center_y - loc.y - 5.0
x2 = center_x + loc.x + 5.0
y2 = center_y - loc.y + 5.0
self.canvas.create_oval(x1,y1,x2,y2,fill=ink,width=0)
def render(self):
""" Renders the state of the simuation on the tkinter canvas. """
self.canvas.delete('all')
self.draw_grid()
for (segment,ink) in self.segments:
self.draw_segment(segment,ink)
for (loc,ink) in self.marks:
self.draw_mark(loc,ink)
for turtle in self.turtles:
self.draw_turtle(turtle)
self.update()
def add_segment(self,seg,ink):
pair = (seg,ink)
self.segments.append(pair)
def add_mark(self,loc,ink):
pair = (loc,ink)
self.marks.append(pair)
def add_turtle(self,turtle):
self.turtles.append(turtle)
#
# __init__ - constructs a new Grid instance
#
def __init__(self):
self.size = 500
self.border_size = 50
self.turtles = []
self.segments = []
self.marks = []
# initialize the display
self.root = Tk()
self.root.title('MATH 121 Turtle Canvas')
Frame.__init__(self, self.root)
self.canvas = Canvas(self.root, width=self.size, height=self.size)
self.canvas.pack()
self.pack()
self.render()
| true |
7901e6fc860715ed93f422dfe921081a3cbf9cf9 | Python | gowth6m/zombie-maze | /settings.py | UTF-8 | 1,354 | 2.65625 | 3 | [] | no_license | # IMPORTS AND FILES
import pygame as pg
import math
from random import choice
pg.init()
pg.font.init()
vec = pg.math.Vector2
# COLOURS
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
# GAME SETTINGS
TITLE = 'Zombie Maze'
WIDTH = 800
HEIGHT = int(WIDTH/4 * 3)
SCREEN_SIZE = (WIDTH, HEIGHT)
FPS = 60
TILESIZE = 32
GRIDWIDTH = WIDTH / TILESIZE
GRIDHEIGHT = HEIGHT / TILESIZE
# PLAYER PROPERTIES
PLAYER_HP = 150
PLAYER_SPEED = 150
PLAYER_IMG = 'main_player.png'
PLAYER_ROT_SPEED = math.pi
PLAYER_HIT_RECT = pg.Rect(0, 0, 32, 32)
ZOMBIES_KILLED = 0
# GUN PROPERTIES
BULLET_OFFSET = vec(20, 10)
BULLET_IMG = 'bullet.png'
BULLET_SPEED = 300
BULLET_TRAVEL = 1000
BULLET_RATE = 200
KNOCKBACK = 20
GUN_SPREAD = 5
BULLET_DAMAGE = 20
# MOB PROPERTIES
MOB_IMG = 'mob1.png'
MOB_IMG2 = 'mob2.png'
MOB_IMG3 = 'mob3.png'
MOB_HP = 100
MOB_HP2 = 50
MOB_HP3 = 200
MOB_DETECT = 400
MOB_SPEEDS = [50, 75, 25]
MOB_S = 2.25
MOB_HIT_RECT = pg.Rect(0, 0, 30, 30)
MOB_DAMAGE = 10
MOB_KNOCKBACK = 20
AVOID_RADIUS = 50
# BLOOD = 'blood.png'
# FONT
FONT = pg.font.SysFont("None", 25)
FONT2 = pg.font.SysFont("None", 60)
# FONT2 = pg.font.
# IMAGES
BG = pg.image.load("img/thefloor.png")
BG2 = pg.image.load("img/wall2.jpg")
BACKGROUND = pg.image.load("img/grass.jpg")
| true |
0e1943f715a4204b150dda06654a72db8b225dff | Python | YuzhiSun/CompetitionLearn | /Hand_on_Books/topic1/data_explore2.py | UTF-8 | 9,256 | 2.75 | 3 | [] | no_license | from scipy import stats
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import warnings
warnings.filterwarnings("ignore")
train_data_file = "D:\\project\\TianChi\\data\\zhengqi_train.txt"
test_data_file = "D:\\project\\TianChi\\data\\zhengqi_test.txt"
train_data = pd.read_csv(train_data_file, sep='\t', encoding='utf-8')
test_data = pd.read_csv(test_data_file, sep='\t', encoding='utf-8')
X_train = train_data.iloc[:, 0:-1]
y_train = train_data.iloc[:, -1]
def find_outliers(model, X, y, sigma=3):
try:
y_pred = pd.Series(model.predict(X), index=y.index)
except:
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=y.index)
resid = y - y_pred
mean_resid = resid.mean()
std_resid = resid.std()
z = (resid - mean_resid) / std_resid
outliers = z[abs(z) > sigma].index
print('R2=', model.score(X, y))
print("mse=", mean_squared_error(y, y_pred))
print('-----------------------------------------')
print('mean of residuals:', mean_resid)
print('std of residuals:', std_resid)
print('-----------------------------------------')
print(len(outliers), 'outliers:')
print(outliers.tolist())
plt.figure(figsize=(15, 5))
ax_131 = plt.subplot(1, 3, 1)
plt.plot(y, y_pred, '.')
plt.plot(y.loc[outliers], y_pred.loc[outliers], 'ro')
plt.legend(['Accepted', 'Outlier'])
plt.xlabel('y')
plt.ylabel('y_pred')
ax_132 = plt.subplot(1, 3, 2)
plt.plot(y, y - y_pred, '.' )
plt.plot(y.loc[outliers], y.loc[outliers] - y_pred.loc[outliers], 'ro')
plt.legend(['Accepted', 'Outlier'])
plt.xlabel('y')
plt.ylabel('y - y_pred')
ax_133 = plt.subplot(1, 3, 3)
z.plot.hist(bins=50, ax = ax_133)
z.loc[outliers].plot.hist(color='r', bins=50, ax=ax_133)
plt.legend(['Accepted', 'Outlier'])
plt.xlabel('z')
plt.savefig('D:\\project\\TianChi\\Hand_on_Books\\topic1\\outliers.png')
return outliers
# outliers = find_outliers(Ridge(), X_train, y_train)
def draw_QQ_sample():
plt.figure(figsize=(10, 5))
ax = plt.subplot(1,2,1)
sns.distplot(train_data['V0'], fit = stats.norm)
ax = plt.subplot(1,2,2)
res = stats.probplot(train_data['V0'], plot = plt)
# plt.show()
def draw_QQ_of_variable():
train_cols = 6
train_rows = len(train_data.columns)
plt.figure(figsize=(4 * train_cols, 4 * train_rows))
i = 0
for col in train_data.columns:
i+=1
ax = plt.subplot(train_rows, train_cols, i)
sns.distplot(train_data[col], fit=stats.norm)
i+=1
ax = plt.subplot(train_rows, train_cols, i)
res = stats.probplot(train_data[col], plot = plt)
plt.tight_layout()
plt.savefig('D:\\project\\TianChi\\Hand_on_Books\\topic1\\QQ.png')
# draw_QQ_of_variable()
def KDE():
plt.figure(figsize=(8, 4), dpi=150)
ax = sns.kdeplot(train_data['V0'], color="Red", shade=True)
ax = sns.kdeplot(test_data['V0'], color="Blue", shade=True)
ax.set_xlabel('V0')
ax.set_ylabel("Frequency")
ax = ax.legend(["train", "test"])
# plt.show()
dist_cols = 6
dist_rows = len(test_data.columns)
plt.figure(figsize=(4 * dist_cols, 4 * dist_rows))
i = 1
for col in test_data.columns:
ax = plt.subplot(dist_rows, dist_cols, i)
ax = sns.kdeplot(train_data[col], color="Red", shade=True)
ax = sns.kdeplot(test_data[col], color="Blue", shade=True)
ax.set_xlabel(col)
ax.set_ylabel("Frequency")
ax = ax.legend(["train", "test"])
i += 1
plt.savefig('D:\\project\\TianChi\\Hand_on_Books\\topic1\\KDE.png')
# KDE()
def liner_reg():
fcols = 2
frows = 1
plt.figure(figsize=(8, 4), dpi=150)
ax = plt.subplot(1,2,1)
sns.regplot(x='V0', y='target', data=train_data, ax=ax,
scatter_kws={'marker':'.', 's':3, 'alpha':0.3},
line_kws={'color':'k'})
plt.xlabel('V0')
plt.ylabel('target')
ax = plt.subplot(1,2,2)
sns.distplot(train_data['V0'].dropna())
plt.xlabel('V0')
# plt.show()
fcols = 6
frows = len(test_data.columns)
plt.figure(figsize=(5 * fcols, 4 * frows))
i = 0
for col in test_data.columns:
i += 1
ax = plt.subplot(frows, fcols, i)
sns.regplot(x=col, y='target', data=train_data, ax = ax,
scatter_kws={'marker':'.', 's':3, 'alpha':0.3},
line_kws={'color':'k'})
plt.xlabel(col)
plt.ylabel('target')
i += 1
ax = plt.subplot(frows, fcols, i)
sns.distplot(train_data[col].dropna())
plt.xlabel(col)
plt.savefig('D:\\project\\TianChi\\CompetitionLearn\\Hand_on_Books\\topic1\\image\\line_reg.png')
# liner_reg()
data_train1 = train_data.drop(['V5','V9','V11','V17','V22','V28'], axis=1)
def cal_corr():
pd.set_option('display.max_columns', 10)
pd.set_option('display.max_rows', 10)
# 删掉测试集和训练集分布不一致的变量 得到 data_train1 在函数外已经执行
train_corr = data_train1.corr()
# print(train_corr)
# 画热力图
ax = plt.subplots(figsize=(20, 16))
ax = sns.heatmap(train_corr, vmax=.8, square=True, annot=True)
# plt.show(annot=True) # annot=True 表示显示系数
# 根据相关系数筛选特征变量
k = 10 # 需要筛选出的特征数量
cols = train_corr.nlargest(k, 'target')['target'].index #按照指定列选出前n大的行
cm = np.corrcoef(train_data[cols].values.T)
hm = plt.subplots(figsize = (10, 10))
hm = sns.heatmap(train_data[cols].corr(), annot=True, square=True)
plt.show()
# 找出相关系数大于0.5的变量
threshold = 0.5
corrmat = train_data.corr()
top_corr__features = corrmat.index[abs(corrmat['target']) > threshold]
plt.figure(figsize=(10, 10))
g = sns.heatmap(train_data[top_corr__features].corr(),
annot=True,
cmap="RdYlGn")
# plt.show()
# 用相关系数阈值移除相关特征
corr_matrix = data_train1.corr().abs()
drop_col = corr_matrix[corr_matrix['target'] < threshold].index
# data_all.drop(drop_col, axis=1, inplace=True)
# cal_corr()
def scale_minmax(col):
return (col - col.min()) / (col.max() - col.min())
def Box_Cox():
# Box-Cox 变换
drop_columns = ['V5', 'V9', 'V11', 'V17', 'V22', 'V28']
# 合并训练集和测试集的数据
train_x = train_data.drop(['target'], axis=1)
data_all = pd.concat([train_x, test_data])
data_all.drop(drop_columns, axis=1, inplace=True)
# print(data_all.head())
cols_numeric = list(data_all.columns)
# 对每列数据进行归一化
data_all[cols_numeric] = data_all[cols_numeric].apply(scale_minmax, axis=0)
# print(data_all[cols_numeric].describe())
train_data_process = train_data[cols_numeric]
train_data_process = train_data_process[cols_numeric].apply(scale_minmax,
axis=0)
test_data_process = test_data[cols_numeric]
test_data_process = test_data_process[cols_numeric].apply(scale_minmax,
axis=0)
# 变换后, 计算分位数并画图展示,显示特征变量与target变量的线性关系
cols_numeric_left = cols_numeric[0:13]
cols_numeric_right = cols_numeric[13:]
train_data_process = pd.concat([train_data_process, train_data['target']],
axis=1)
fcols = 6
frows = len(cols_numeric_left)
plt.figure(figsize=(4 * fcols, 4 * frows))
i = 0
for var in cols_numeric_left:
dat = train_data_process[[var,'target']].dropna()
i += 1
plt.subplot(frows, fcols, i)
sns.distplot(dat[var], fit=stats.norm)
plt.title(var + ' Original')
plt.xlabel('')
i += 1
plt.subplot(frows, fcols, i)
_ = stats.probplot(dat[var], plot=plt)
plt.title('skew=' + '{:.4f}'.format(stats.skew(dat[var])))
plt.xlabel('')
plt.ylabel('')
i += 1
plt.subplot(frows, fcols, i)
plt.plot(dat[var], dat['target'], '.', alpha=0.5)
plt.title('corr=' +
'{:.2f}'.format(np.corrcoef(dat[var], dat['target'])[0][1]))
i += 1
plt.subplot(frows, fcols, i)
trans_var, lambda_var = stats.boxcox(dat[var].dropna() + 1)
trans_var = scale_minmax(trans_var)
sns.distplot(trans_var, fit=stats.norm)
plt.title(var + ' Transformed')
plt.xlabel('')
i += 1
plt.subplot(frows, fcols, i)
_ = stats.probplot(trans_var, plot=plt)
plt.title('skew=' + '{:.4f}'.format(stats.skew(trans_var)))
plt.xlabel('')
plt.ylabel('')
i += 1
plt.subplot(frows, fcols, i)
plt.plot(trans_var, dat['target'], '.', alpha=0.5)
plt.title('corr=' +
':.2f'.format(np.corrcoef(trans_var, dat['target'])[0][1]))
plt.savefig('D:\\project\\TianChi\\CompetitionLearn\\Hand_on_Books\\topic1\\image\\Box-Cox.png')
# Box_Cox()
| true |
a43c0c449b387ba5d0ee2e84b7d8082e7781d9aa | Python | SungFeng-Huang/SSL-pretraining-separation | /local/librimix/create_local_metadata.py | UTF-8 | 1,682 | 2.546875 | 3 | [] | no_license | import os
import shutil
import argparse
from glob import glob
import pandas as pd
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--librimix_dir", type=str, default=None, help="Path to librispeech root directory"
)
parser.add_argument(
"--metadata_old_root", type=str, default=None, help="Old root in metadata, specified to change to new root"
)
def main(args):
librimix_dir = args.librimix_dir
metadata_old_root = args.metadata_old_root
create_local_metadata(librimix_dir, metadata_old_root)
def create_local_metadata(librimix_dir, metadata_old_root):
corpus = librimix_dir.split("/")[-1]
md_dirs = [f for f in glob(os.path.join(librimix_dir, "*/*/*")) if f.endswith("metadata")]
for md_dir in md_dirs:
md_files = [f for f in os.listdir(md_dir) if f.startswith("mix")]
for md_file in md_files:
print(md_dir, md_file)
subset = md_file.split("_")[1]
local_path = os.path.join(
"data/librimix", os.path.relpath(md_dir, librimix_dir), subset
).replace("/metadata", "")
os.makedirs(local_path, exist_ok=True)
if metadata_old_root is None:
shutil.copy(os.path.join(md_dir, md_file), local_path)
else:
data = pd.read_csv(os.path.join(md_dir, md_file))
for key in data.keys():
if "path" in key:
data[key] = data[key].str.replace(metadata_old_root, librimix_dir)
data.to_csv(os.path.join(local_path, md_file), index=0)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
| true |
c1f24ee212d6cd6f56928b298cc05d5c65b6d6ab | Python | kellyhennigan/cueexp_scripts | /preproc_func.py | UTF-8 | 9,187 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python
# filename: preproc_func.py
# script to do pre-processing of functional data. Does the following:
# 1) drops first X volumes from each scan run
# 2) slice time correction
# 3) pulls out a functional reference volume from the 1st run for each task
# 4) motion correct to task functional reference volume; saves out motion params
# to a text file
# 5) smooths data
# 6) converts to % change units
# 6) applies a high-pass filter
# after doing that for each scan within a task:
# 7) concatanate preprocessed scans & motion params, delete intermediary files
# NOTE: this script was written for usign nifti formatted data,
# not afni's annoying brik/head format
# TO DO:
# remove ref file; change
import os,sys,glob
##################### define global variables ##################################
# EDIT AS NEEDED:
os.chdir('../')
main_dir=os.getcwd()
os.chdir('scripts')
# data directory
dataDir=main_dir+'/data'
#dataDir = main_diros.path.join(os.path.expanduser('~'),'cueexp','data')
rawFile = os.path.join(dataDir,'%s','raw','%s%d.nii.gz') # %s for subject,
# then task, %d for scan run number
outDir = os.path.join(dataDir,'%s','func_proc') # dir for processed data
print 'execute commands?'
xc = bool(input('enter 1 for yes, or 0 to only print: '))
############## define relevant variables for each pre-processing step as needed
# how many volumes to drop from the beginning of each scan?
drop_nvols = 6
# slice timing correction parameter string
st_param_str = '-slice 0 -tpattern altplus'
# which volume to use from 1st func run as a reference volume?
ref_idx = 4
# motion correction parameter string
mc_param_str = '-Fourier -twopass -zpad 4' # mc params
censor_lim = 0.5 # euclidian norm limit for censoring vols due to motion
# fwhm gaussian kernel to use for smoothing (in mm)
smooth_mm = 4
# high-pass filter cutoff (.011 hz ~= 1 cycle/90 sec)
hz_limit = .011
###############################################################################
############################### DO IT #########################################
###############################################################################
###### HOPEFULLY DON T HAVE TO EDIT BELOW HERE. OR THATS THE PLAN ANYWAY. #####
###############################################################################
######### print commands & execute if xc is True, otherwise just print them
def doCommand(cmd):
print cmd+'\n'
if xc is True:
os.system(cmd)
######### get task
def whichTask():
all_tasks = ['cue','mid','midi']
all_runs = [[1],[1,2],[1,2]] # # of runs corresponding to each task
print('process which task?\n')
print('\t1) '+all_tasks[0])
print('\t2) '+all_tasks[1])
print('\t3) '+all_tasks[2]+'\n')
ti = raw_input('enter 1,2, or 3, or hit enter to process all: ') # task index
if ti:
tasks = [all_tasks[int(ti)-1]]
runs = [all_runs[int(ti)-1]]
else:
tasks = all_tasks
runs = all_runs
return tasks,runs
######### get main data directory and subjects to process
def whichSubs():
from getCueSubjects import getsubs
subjects,gi = getsubs()
print(' '.join(subjects))
input_subs = raw_input('subject id(s) (hit enter to process all subs): ')
print('\nyou entered: '+input_subs+'\n')
if input_subs:
subjects=input_subs.split(' ')
return subjects
######### make sure that the filename makes sense
def checkNiiFName(niiFileName):
i = niiFileName.find('nii')
# if there's no nifti suffix in the filename, add it:
if i==-1:
if niiFileName.find('+')==-1: # (unless there's an afni suffix)
niiFileName=niiFileName+'.nii.gz'
return niiFileName
######### concatenate data in the 4th dim; sub_idx must be either string in
# afni notation or an integer indexing which vol(s) to concatanate
def cat4d(inFiles,outFile,sub_idx=''):
outFile = checkNiiFName(outFile)
if type(inFiles) is str:
inFiles = [inFiles]
if type(sub_idx) is int:
sub_idx = '['+str(sub_idx)+']'
cmd = ('3dTcat -output '+outFile+' '+' '.join([s + sub_idx for s in inFiles]))
doCommand(cmd)
return outFile
######### slice time correct
def correctSliceTiming(inFile):
outFile = checkNiiFName('t'+inFile)
# uses global variable st_param_str
cmd = ('3dTshift -prefix '+outFile+' '+st_param_str+' '+inFile)
doCommand(cmd)
return outFile
######### motion correct: inFile, r is run num, refFile is functional ref volume
def correctMotion(inFile,refFile):
outFile = checkNiiFName('m'+inFile)
# uses global variables task, r, mc_param_str, censor_lim
mc_str = task+str(r)+'_vr'
# motion correct; uses global var mc_param_str
cmd = ('3dvolreg '+mc_param_str+' -dfile '+mc_str+'.1D -base '+refFile+' '
'-prefix '+outFile+' '+inFile)
doCommand(cmd)
# censor vols w/motion; uses global var censor_lim
cmd = ('1d_tool.py -infile '+mc_str+'.1D[1..6] -show_censor_count '
'-censor_prev_TR -censor_motion '+str(censor_lim)+' '+task+str(r))
doCommand(cmd)
# make a list that contains all motion-correction related generated files
mc_files = [mc_str+'.1D',task+str(r)+'_censor.1D',task+str(r)+'_enorm.1D']
return (outFile, mc_files)
######### smooth
def smooth(inFile):
outFile = checkNiiFName('s'+inFile)
# uses global variable smooth_mm
cmd = ('3dmerge -1blur_fwhm '+str(smooth_mm)+' -doall -quiet '
'-prefix '+outFile+' '+inFile)
doCommand(cmd)
return outFile
######### convert BOLD data to units of % signal change
def convertUnits(inFile):
outFile = checkNiiFName('p'+inFile)
meanFile = checkNiiFName(task+str(r)+'_mean')
# first calculate run mean
cmd = ('3dTstat -mean -prefix '+meanFile+' '+inFile)
doCommand(cmd)
# now scale data
cmd = ('3dcalc -a '+inFile+' -b '+meanFile+' -expr "((a-b)/b)*100" '
'-prefix '+outFile+' -datum float')
doCommand(cmd)
return outFile
######### high-pass filter
def hpFilter(inFile):
outFile = checkNiiFName('f'+inFile)
# uses global variable hz_limit
cmd = ('3dFourier -highpass '+str(hz_limit)+' -prefix '+outFile+' '+inFile)
doCommand(cmd)
return outFile
######### concatenate motion files across runs - this is very specific!!
def concatRunMCFiles(mcFiles):
########## clear out any pre-existing concatenated motion files
cmd = ('rm '+task+'_vr.1D; rm '+task+'_censor.1D; rm '+task+'_enorm.1D')
doCommand(cmd)
########## create master motion regs file for all runs
cmd = ('cat '+mcFiles[0]+' >> '+task+'_vr.1D')
doCommand(cmd)
########## create master motion censor file for all runs
cmd = ('cat '+mcFiles[1]+' >> '+task+'_censor.1D')
doCommand(cmd)
########## create master motion enorm file for all runs
cmd = ('cat '+mcFiles[2]+' >> '+task+'_enorm.1D')
doCommand(cmd)
########################### MAIN PREPROC FUNCTION ###########################
if __name__ == '__main__':
tasks,runs = whichTask()
subjects = whichSubs()
for subject in subjects: # subject loop
print('\nWORKING ON SUBJECT '+subject+'\n')
# define subject specific directory for processed data
this_outDir = outDir % (subject)
# make out directory if doesn't already exist
if not os.path.exists(this_outDir):
print 'making new dir: '+this_outDir+'\n'
if xc is True:
os.makedirs(this_outDir)
# cd to processed data directory
print 'cd '+this_outDir+'\n'
if xc is True:
os.chdir(this_outDir)
# task loop
for t in range(0,len(tasks)):
task = tasks[t] # cue, mid, or midi
task_runs = runs[t] # number of scan runs in this task
print 'PROCESSING '+task+' DATA\n'
ppFiles = [] # list of pre-processed file names
mcFiles = ['','',''] # motion file names from each run
for r in task_runs:
######### raw to afni format, omit first TRs if desired
this_rawFile = rawFile % (subject,task,r)
funcFile = cat4d(this_rawFile,task+str(r),'['+str(drop_nvols)+'..$]')
######### slice time correct
funcFile = correctSliceTiming(funcFile)
######### make a functional reference volume if it doesn't exist yet
if r==1:
refFile = cat4d(funcFile,'ref_'+task,ref_idx)
######### motion correct
funcFile,these_mc_files = correctMotion(funcFile,refFile)
mcFiles = ["%s %s" % t for t in zip(mcFiles,these_mc_files)]
######### smooth
funcFile = smooth(funcFile)
######### convert to % change units
funcFile = convertUnits(funcFile)
######### high pass filter
funcFile = hpFilter(funcFile)
######### list of pre-processed file names for concatenating
ppFiles = ppFiles + [funcFile]
print('\n\nFINISHED RUN '+str(r)+'\n\n')
######### convert pre-processed files to nifti & concat runs
outFile = cat4d(ppFiles,'pp_'+task)
######### concatenate motion files across runs
concatRunMCFiles(mcFiles)
########## remove intermediary steps (i.e., *mid1* , *mid2*)
for ri in range(r,0,-1):
cmd = ('rm *'+task+str(ri)+'*')
doCommand(cmd)
print('\n\npre-processed '+task+' data saved to '+outFile+'\n\n')
print('\nFINISHED SUBJECT '+subject+'\n') ########### end of subject loop
print('\nDONE\n')
| true |
9651ca498f7074efad0084e6e7020919db291b41 | Python | ruanyangry/web-scraping-with-python-book-scripts | /ch03/3.2.1.3.py | UTF-8 | 2,167 | 2.796875 | 3 | [] | no_license | # _*_ coding: utf-8 _*_
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import datetime
import random
pages=set()
random.seed(datetime.datetime.now())
# get page internal links list
def getInternalLinks(bsObj,includeUrl):
internalLinks=[]
for link in bsObj.findAll("a",href=re.compile("^(/|.*"+includeUrl+")")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in internalLinks:
internalLinks.append(link.attrs['href'])
return internalLinks
# get page external links list
def getExternalLinks(bsObj,excludeUrl):
externalLinks=[]
for link in bsObj.findAll("a",href=re.compile("^(http|www)((?!"+excludeUrl+").)*$")):
if link.attrs['href'] is not None:
if link.attrs['href'] not in externalLinks:
externalLinks.append(link.attrs['href'])
return externalLinks
def splitAddress(address):
addressParts=address.replace("http://","").split("/")
return addressParts
def getRandomExternalLink(startingPage):
html=urlopen(startingPage)
bsObj=BeautifulSoup(html)
externalLinks=getExternalLinks(bsObj,splitAddress(startingPage)[0])
if len(externalLinks)==0:
internalLinks=getInternalLinks(startingPage)
return getNextExternalLink(internalLinks[random.randint(0,len(internalLinks)-1)])
else:
return externalLinks[random.randint(0,len(externalLinks)-1)]
def followExternalOnly(startingSite):
externalLink=getRandomExternalLink("http://oreilly.com")
print("The random link:"+externalLink)
followExternalOnly(externalLink)
# Add new function
allExtLinks=set()
allIntLinks=set()
def getAllExternalLinks(siteUrl):
html=urlopen(siteUrl)
bsObj=BeautifulSoup(html)
internalLinks=getInternalLinks(bsObj,splitAddress(siteUrl)[0])
externalLinks=getExternalLinks(bsObj,splitAddress(siteUrl)[0])
for link in externalLinks:
if link not in allExtLinks:
allExtLinks.add(link)
print(link)
for link in internalLinks:
if link not in allIntLinks:
print('Coming url:'+link)
allIntLinks.add(link)
getAllExternalLinks(link)
#followExternalOnly("http://oreilly.com")
getAllExternalLinks("http://oreilly.com")
| true |
d299134c6d020126e2b8fcc0ec8e5f0c382d6f07 | Python | elliotmoose/2SatSolver | /main.py | UTF-8 | 286 | 2.640625 | 3 | [] | no_license | import basic.cnfgraph as cnfgraph
import basic.parser as parser
clauses = parser.parse_cnf_file_to_clauses(parser.INPUT_FILE_PATH)
edges = cnfgraph.edges_from_clauses(clauses)
nodes = cnfgraph.graph_nodes_from_edges(edges)
print(", \n".join(str(node) for node in nodes.values()))
| true |
42476868a596397efe556d2367d80d79e8e2297b | Python | wunnox/python_grundlagen | /stadtlauf_bern_oo_modul.py | UTF-8 | 4,886 | 3.046875 | 3 | [] | no_license | #! env python3
##############################################
#
# Name: stadtlauf_bern_oo_modul.py
#
# Author: Peter Christen
#
# Version: 1.0
#
# Date: 10.09.2022
#
# Purpose: Modul zu Script Stadtlauf_Bern_OO.py
#
##############################################
#Module
import pygame
#Initialisierung
pygame.init()
pygame.display.set_caption("Spaziergang durch Bern")
screen = pygame.display.set_mode((1050,400))
#Bilder
walkRight = [pygame.image.load('Bilder/R1.png'), pygame.image.load('Bilder/R2.png'), pygame.image.load('Bilder/R3.png'), pygame.image.load('Bilder/R4.png'), pygame.image.load('Bilder/R5.png'), pygame.image.load('Bilder/R6.png'), pygame.image.load('Bilder/R7.png'), pygame.image.load('Bilder/R8.png'), pygame.image.load('Bilder/R9.png')]
walkLeft = [pygame.image.load('Bilder/L1.png'), pygame.image.load('Bilder/L2.png'), pygame.image.load('Bilder/L3.png'), pygame.image.load('Bilder/L4.png'), pygame.image.load('Bilder/L5.png'), pygame.image.load('Bilder/L6.png'), pygame.image.load('Bilder/L7.png'), pygame.image.load('Bilder/L8.png'), pygame.image.load('Bilder/L9.png')]
bg = pygame.image.load('Bilder/karte-bern_kl.jpg')
char = pygame.image.load('Bilder/standing.png')
#Variablen
font = pygame.font.SysFont('Comic Sans MS', 20)
clock = pygame.time.Clock()
xt=0 #Standard Textposition x
yt=0 #Standard Textposition y
x = 20
y = 155
walkCount = 0
keys = pygame.key.get_pressed()
def redrawGameWindow(text2show,xt,yt,x,y,left,right):
'''Baut das Bild neu auf'''
global screen,walkCount
textimg = font.render(text2show, True, (255, 0, 0))
screen.blit(bg, (0,0))
screen.blit(textimg, (xt,yt))
if walkCount + 1 >= 27:
walkCount = 0
if left:
screen.blit(walkLeft[walkCount//3], (x,y))
walkCount += 1
elif right:
screen.blit(walkRight[walkCount//3], (x,y))
walkCount += 1
else:
screen.blit(char, (x, y))
walkCount = 0
pygame.display.update()
class Figur:
'''Klasse zum Verwalten der Spielfigur'''
#Konstruktor Methode
def __init__(self,figur):
self.figur=figur #Key Addtribut
self.x=20 #Start Position x
self.y=150 #Start Position y
self.max_x=1000 #Max Position x
self.max_y=340 #Max Position y
self.min_x=-10 #Min Position x
self.min_y=-10 #Min Position y
self.left = False #Nicht Richtung links schauen
self.right = False #Nicht Richtung rechts schauen
def go_walk_right(self,gx,gy):
'''Läuft nach einem vorgegebenen Plan nach rechts'''
self.x,self.y=gx,gy
self.left = False
self.right = True
return self.x,self.y,self.left,self.right
def go_walk_left(self,gx,gy):
'''Läuft nach einem vorgegebenen Plan nach links'''
self.x,self.y=gx,gy
self.left = True
self.right = False
return self.x,self.y,self.left,self.right
def go_left(self,steps=1):
'''Nach links gehen'''
if self.x > self.min_y:
self.x -= steps
self.left = True
self.right = False
return self.x,self.left,self.right
else:
self.go_stop()
def go_right(self,steps=1):
'''Nach rechts gehen'''
if self.x < self.max_x:
self.x += steps
self.left = False
self.right = True
return self.x,self.left,self.right
else:
self.go_stop()
def go_stop(self):
'''Bewegung stoppen'''
self.left = False
self.right = False
walkCount = 0
return walkCount,self.left,self.right
def go_up(self,steps=1):
'''Nach oben gehen'''
if self.y > self.min_y:
self.y-=steps
return self.y
else:
self.go_stop()
def go_down(self,steps=1):
'''Nach unten gehen'''
if self.y < self.max_y:
self.y+=steps
return self.y
else:
self.go_stop()
def check_key(self):
'''Prüfen welche Taste gedrückt wurde'''
self.left=False
self.right=False
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
self.go_right(1)
self.right=True
print("Position x,y: ",self.x,self.y)
elif keys[pygame.K_LEFT]:
self.go_left(1)
self.left=True
print("Position x,y: ",self.x,self.y)
if keys[pygame.K_UP]:
self.go_up(1)
print("Position x,y: ",self.x,self.y)
elif keys[pygame.K_DOWN]:
self.go_down(1)
print("Position x,y: ",self.x,self.y)
if keys[pygame.K_q]:
return self.x,self.y,self.left,self.right,False
else:
return self.x,self.y,self.left,self.right,True
| true |
ac81587b2a5a420aed9de25ee9485bc5f6392f8d | Python | simonbrahan/adventofcode | /2015/15/15_1.py | UTF-8 | 1,704 | 3.359375 | 3 | [] | no_license | import re
def get_cmd(line):
pattern = re.compile('(\w+): capacity (-?\d+), durability (-?\d+), flavor (-?\d+), texture (-?\d+), calories (-?\d+)')
res = re.search(pattern, line)
return {
'name': res.group(1),
'capacity': int(res.group(2)),
'durability': int(res.group(3)),
'flavour': int(res.group(4)),
'texture': int(res.group(5)),
'calories': int(res.group(6))
}
def get_recipe_score(ingredients, recipe):
capacity = 0
durability = 0
flavour = 0
texture = 0
calories = 0
for label, quantity in recipe.items():
capacity += quantity * ingredients[label]['capacity']
durability += quantity * ingredients[label]['durability']
flavour += quantity * ingredients[label]['flavour']
texture += quantity * ingredients[label]['texture']
calories += quantity * ingredients[label]['calories']
return {
'score': max(capacity, 0) * max(durability, 0) * max(flavour, 0) * max(texture, 0),
'cals': calories
}
ex_input = open('input.txt', 'r')
ingredients = {}
for line in ex_input:
ingredient = get_cmd(line)
ingredients[ingredient['name']] = ingredient
high_score = 0
spoonfuls = 100
for i in range(spoonfuls + 1):
recipe = { 'Frosting': i }
for j in range(spoonfuls - i + 1):
recipe['Candy'] = j
for k in range(spoonfuls - i - j + 1):
recipe['Butterscotch'] = k
recipe['Sugar'] = spoonfuls - i - j - k
recipe_stats = get_recipe_score(ingredients, recipe)
if recipe_stats['cals'] == 500:
high_score = max(recipe_stats['score'], high_score)
print high_score
| true |
b1a7791f802362e7de3dfb0bac6b56aebcc43a24 | Python | anachronic/tarea1-seguridad | /mac.py | UTF-8 | 1,919 | 3.21875 | 3 | [] | no_license | #!/usr/bin/python
import sys
import os
from parser import parsear_input
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives import hashes, padding
from cryptography.hazmat.backends import default_backend
def usage():
print("usage")
print("mac -g <key>: Genera un MAC para la entrada estandar usando key como clave")
print("mac -v <key>: Verifica la autenticidad de un texto (primera linea: texto, segunda linea: MAC)")
# en retrospectiva bastaba llamar a encriptar y sacar los ultimos 16 bytes...
def generar_mac(texto, key):
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(key.encode())
keydigest = digest.finalize()
# 0^n es el vector de inicializacion segun las diapos...
iv = bytes([0] * 16)
cipher = Cipher(algorithms.AES(keydigest),
modes.CBC(iv), backend=default_backend())
encryptor = cipher.encryptor()
padder = padding.PKCS7(128).padder()
texto = padder.update(texto)
texto += padder.finalize()
textocifrado = encryptor.update(texto) + encryptor.finalize()
if len(textocifrado) >= 16:
return textocifrado[-16:]
# Huh?
return None
def verificar_mac(texto, mac, key):
return mac == generar_mac(texto, key)
if __name__ == "__main__":
if sys.argv[1] == "-g":
texto = sys.stdin.readline()
print("MAC generado: " +
str(generar_mac(parsear_input(texto), str(sys.argv[2]))))
elif sys.argv[1] == '-v':
print("Ingrese el texto a verificar: ")
texto = sys.stdin.readline()
print("Ingrese el MAC a verificar:")
mac = sys.stdin.readline()
ok = verificar_mac(parsear_input(
texto), parsear_input(mac), str(sys.argv[2]))
if ok:
print("ok")
else:
print("no")
else:
usage()
| true |
81904127481ed6cf633dc4a1ab61d1ddf611f03c | Python | codingant007/fuzzy-fiesta | /makeMetadata.py | UTF-8 | 4,900 | 2.796875 | 3 | [] | no_license | # Standalone script for generating pickle file of metadata
# Generates alldata.pkl, train.pkl, val.pkl, test.pkl
# Generates alldata_group.pkl, train_group.pkl, val_group.pkl, test_group.pkl
# usage - makeMetadata.py <dataset path>
import six.moves.cPickle as pickle
import os,sys
from os.path import isfile, join
import re
import random
dataset_path = sys.argv[1]
def insert_groupinfo(sample,class_name,label_number,group_number,group_info):
grp_sample_list = []
if group_number in group_info.keys():
assert group_info[group_number][0] == group_number
assert group_info[group_number][1] == class_name
assert group_info[group_number][2] == label_number
grp_sample_list = group_info[group_number][3]
grp_sample_list += [sample]
group_info[group_number] = (group_number,class_name,label_number,grp_sample_list)
return group_info
def getSamplesInfo():
sample_list = []
group_info = {}
label_number = 0
for class_name in os.listdir(dataset_path):
if not isfile(join(dataset_path,class_name)):
print class_name
label_number = label_number+1
for sample in os.listdir(join(dataset_path,class_name)):
if not sample.find('-') < 0:
sample_path = join(join(dataset_path,class_name),sample)
group_number = sample.split('-')[0]
group_info = insert_groupinfo(sample_path ,class_name ,label_number ,group_number ,group_info)
sample_list.append((sample_path,class_name,label_number,group_number))
return sample_list,group_info
def partition(sample_list, proportion=(0.7,0.1,0.2)):
total_size = len(sample_list)
traning_size = int(total_size*proportion[0])
validation_size = int(total_size*proportion[1])
training_set = sample_list[:traning_size]
validation_set = sample_list[traning_size:traning_size+validation_size]
test_set = sample_list[traning_size+validation_size:]
return training_set,validation_set,test_set
def get_samples_from_groups(group_list):
sample_list = []
for group in group_list:
for sample in group[3]:
sample_element = (sample,group[1],group[2],group[0])
sample_list += [sample_element]
return sample_list
def filter_samples(sample_list, group_numbers):
return [ sample for sample in sample_list if sample[3] in group_numbers]
def filter_groups(group_list, class_count, fraction):
class_set = set(map(lambda tup:tup[2], group_list))
classwise_list = [ [tup for tup in group_list if tup[2]==class_label and class_label<=class_count] for class_label in class_set]
filtered_group_list = []
for groups in classwise_list:
number_groups_in_class = int(len(groups)*fraction)
filtered_group_list += groups[:number_groups_in_class]
return filtered_group_list
def saveSamplesInfo(alldata_file="metadata/alldata.pkl",train_file="metadata/train.pkl",val_file="metadata/val.pkl",test_file="metadata/test.pkl", \
alldata_group_file="metadata/alldata_group.pkl",train_group_file="metadata/train_group.pkl",val_group_file="metadata/val_group.pkl",test_group_file="test_group.pkl"):
sample_list,group_info = getSamplesInfo()
group_list = group_info.values()
filtered_groups = filter_groups(group_list, 4 , 0.2)
group_numbers = set(map(lambda tup:tup[0],filtered_groups))
sample_list = filter_samples(sample_list,group_numbers)
group_list = filtered_groups
print "No of Groups: ",len(group_list)
random.shuffle(sample_list)
random.shuffle(group_list)
train_set,val_set,test_set = partition(group_list)
print "No of Train groups: ",len(train_set)
print "No of Val groups: ",len(val_set)
print "No of Test groups: ",len(test_set)
train_samples = get_samples_from_groups(train_set)
val_samples = get_samples_from_groups(val_set)
test_samples = get_samples_from_groups(test_set)
print "No of Train Samples: ",len(train_samples)
print "No of Val Samples: ",len(val_samples)
print "No of Test groups: ",len(test_samples)
print "Sample list length: ", len(sample_list)
print "Total Number of samples: ",(len(train_samples)+len(val_samples)+len(test_samples))
assert len(sample_list) == len(train_samples)+len(val_samples)+len(test_samples)
with open(train_file,'wb') as f:
pickle.dump(train_samples , f, pickle.HIGHEST_PROTOCOL)
with open(val_file,'wb') as f:
pickle.dump(val_samples , f, pickle.HIGHEST_PROTOCOL)
with open(test_file,'wb') as f:
pickle.dump(test_samples , f, pickle.HIGHEST_PROTOCOL)
with open(alldata_file,'wb') as f:
pickle.dump(sample_list , f, pickle.HIGHEST_PROTOCOL)
with open(train_group_file,'wb') as f:
pickle.dump(train_set , f, pickle.HIGHEST_PROTOCOL)
with open(val_group_file,'wb') as f:
pickle.dump(val_set , f, pickle.HIGHEST_PROTOCOL)
with open(test_group_file,'wb') as f:
pickle.dump(test_set , f, pickle.HIGHEST_PROTOCOL)
with open(alldata_group_file,'wb') as f:
pickle.dump(group_list , f, pickle.HIGHEST_PROTOCOL)
print "Metadata saved to eight pickle files in metadata/",
if __name__ == '__main__':
saveSamplesInfo() | true |
0d2b34b01f1e226835682a12126aff2b01844aec | Python | Orangeman1226/ProbabilityRobotics | /CameraCs.py | UTF-8 | 12,904 | 2.625 | 3 | [] | no_license | import math
import numpy as np
from scipy.stats import expon , norm ,uniform
import IdealCameraCs as ICamera
from enum import Enum
import OccSensorNoisemarkCs as OccSensNisemark
import WorldCs as wo
from enum import Enum
class KindofObsEventNoise(Enum):
noEvent ="-"
phantom = "p"
oversight = "o"
occulusion = "c"
overRange = "r"
#
# センサには、偶然誤差、系統誤差、過失誤差等があり、具体的に言うといかが考えられる
#
# 1)計測値に対する雑音:ガウス性の雑音でセンサ値がばらつく
# ⇒ガウス分布:計測値を平均に、距離に対しては計測値の一定割合、方角に対しては一定の値を標準偏差としたガウス分布とする。
# 計測距離値が遠くなればなるほどばらつくような仕掛けとする。
# 計測値が実際の値よりも、定常的に大きいまたは小さい値を出すようにする。
#
# 2)バイアス:常に計測値(距離・方角)に一定値を加えるもの
# ⇒一様分布:計測値に対して、一定値だけずらす。系統誤差によるもの表現。
# Sensorが生成されると決定される。距離に対しては一定の割合、方角に対しては一定値を加える
#
# 3)ファントム:計測レンジ内において見えないはずの物体を観測する
# ⇒計測レンジ内の一様分布:偽のランドマークの値をドローして、極座標(r,θ)を返す
# 確率分布は、2項分布とし、一定の確率でファントムが発生する。また、ランドマークが多いほど、観測回数が増えるため
# 同時に、ファントムが観測される回数が増える仕組みとする。
#
# 4)見落とし:計測レンジ内において見えるはずの物体を見落とす
# ⇒計測レンジ内の一様分布:ランドマーク極座標(r,θ)をNone返す
#
# 5)オクルージョン:ランドマークの一部が移動体により隠されてしまいセンサ値に影響が出る。
# ⇒オクルージョンが発生するまでの時間期待値をλとし、
# 事象が発生する単位時間tにおける確率密度は、 P(X|λ)=λexp(-λx) で表される。
# 確率変数Xは、T=n*tのnのことである。
# また、オクルージョンは、計測した値よりも障害物で隠れてしまい、大きくなったり、小さくなったりしてしまうため、
# 計測値に計測レンジにはみ出さない値を(±)加える
#
class Camera(ICamera.IdealCamera):
def __init__( self,env_map,time_interval,
distance_range=(0.5,6.0),camera_phi_range=(-math.pi/3,math.pi/3),
distance_noise_rate = 0.1,camera_phi_noise=math.pi/90,
distance_bias_rate_stddev = 0.1,cameraphi_bias_stddev = math.pi/90,
phantom_binomialprob = 0.01,phantom_Xrange =wo.World.getWorldRange(),phantom_Yrange =wo.World.getWorldRange(),
oversight_binomialprob = 0.1,expected_occlusion_time=5 , occfarOcclusion_prob = 0.5):
super().__init__(env_map,distance_range,camera_phi_range)
self.observeds = []
self.obsEvents = []
self.distance_noise_rate = distance_noise_rate
self.camera_phi_noise = camera_phi_noise
self.dist_bias_rate_std = norm(scale=distance_bias_rate_stddev).rvs()
self.cameraphi_bias_std = norm(scale=cameraphi_bias_stddev).rvs()
rx,ry = phantom_Xrange,phantom_Yrange
self.phantom_uniform_pdf = uniform(loc = (rx[0],ry[0]),scale=(rx[1]-rx[0],ry[1]-ry[0]))
self.phantom_prob = phantom_binomialprob
self.oversight_prob =oversight_binomialprob
self.OccOcclusion_dst = expon(scale = expected_occlusion_time)
self.Occlusion_time = self.OccOcclusion_dst.rvs()
self.far_Occl_prob = occfarOcclusion_prob
self.time_interval = time_interval
def MeasuredValNoise(self,relpos):#計測値に対する雑音:ガウス性の雑音でセンサ値がばらつく
#計測値を平均に、距離に対しては計測値の一定割合、方角に対しては一定の値を標準偏差としたガウス分布とする。
addNoise_distance = norm(loc = relpos[0],scale=relpos[0]*self.distance_noise_rate).rvs()
addNoise_camphi = norm(loc = relpos[1],scale=self.camera_phi_noise).rvs()
return np.array([addNoise_distance,addNoise_camphi]).T
def addBisasNoise(self,relpos): # バイアス:常に計測値(距離・方角)に一定値を加えるもの
#距離に対しては一定の割合、方角に対しては一定値を加える
return relpos + np.array([ relpos[0] * self.dist_bias_rate_std , self.cameraphi_bias_std ]).T
def ObservePhantom(self,relpos,cam_pose):#ファントム:計測レンジ内において見えないはずの物体を観測する
if uniform.rvs() < self.phantom_prob:# 確率分布は、2項分布とし、一定の確率でファントムが発生する。
self.obsEvents.append(KindofObsEventNoise.phantom)
phantom_landark = np.array(self.phantom_uniform_pdf.rvs()).T#偽のランドマークの値をドローして、
return self.observeation_function(cam_pose,phantom_landark)#極座標(r,θ)に変換する
else:
self.obsEvents.append(KindofObsEventNoise.noEvent)
return relpos
def Oversight(self,relpos):#見落とし:計測レンジ内において見えるはずの物体を見落とす
if uniform.rvs() < self.oversight_prob:# 確率分布は、2項分布とし、一定の確率の見逃しが発生する。
self.obsEvents.append(KindofObsEventNoise.oversight)
return None
else:
self.obsEvents.append(KindofObsEventNoise.noEvent)
return relpos
def OccOcclusion(self,relpos):#オクルージョン:ランドマークの一部が移動体により隠されてしまいセンサ値に影響が出る。
if self.Occlusion_time <=0 :#オクルージョンが発生するまでの時間
self.Occlusion_time += self.OccOcclusion_dst.rvs()
self.obsEvents.append(KindofObsEventNoise.occulusion)
if uniform.rvs() < self.far_Occl_prob:#実際の計測値より遠くに見えた場合
far_occ_r = relpos[0] + uniform.rvs()*(self.distance_range[1] - relpos[0])
return np.array([far_occ_r,relpos[1]]).T
else: #実際の計測値より近くに見えた場合
near_occ_r = relpos[0] + uniform.rvs()*( - relpos[0] )
return np.array([near_occ_r,relpos[1]]).T
else:
self.obsEvents.append(KindofObsEventNoise.noEvent)
return relpos
def visible_bySensor(self,pairposes):#計測レンジ内であるか判定処理
if pairposes is None: return False
else:
dis_frRtoLM = pairposes[0]
phi_frRtoLM = pairposes[1]
if( self.distance_range[0] <= dis_frRtoLM <= self.distance_range[1] and
self.camera_phi_range[0] <= phi_frRtoLM <= self.camera_phi_range[1] ):
if KindofObsEventNoise.oversight in self.obsEvents:
return False
else :
return True
else:
self.obsEvents.append(KindofObsEventNoise.overRange)
return False
def data(self,cam_pos):
self.observeds = []
self.Occlusion_time -= self.time_interval #オクルージョンが発生する時間の更新
for lm in self.map.landmarks:
self.obsEvents = []
observed = self.observeation_function(cam_pos,lm.pos)#ロボット位置からランドマークまでの距離:r と 方向:phiを算出
observed = self.ObservePhantom(observed,cam_pos) #ファントムの写り込み
observed = self.OccOcclusion(observed) #オクルージョンの写り込み
self.Oversight(observed) #ランドマークの見落とし
if self.visible_bySensor(observed) : #計測レンジ内もしくはOverSightであるか判定
observed = self.MeasuredValNoise(observed) #計測値に生じるガウス性の雑音を加える
observed = self.addBisasNoise(observed)#系統誤差であるバイアス(一定値)の雑音を加える
self.observeds.append((observed,lm.id,self.obsEvents)) #タプルにして返す ([DIS,PHI],lANDMARK.id)
#理由:あえて変更不可にするため!
else :
self.observeds.append((None,None,self.obsEvents))
self.obs_data = self.observeds
return self.observeds
def draw(self,ax,elems,cam_pos,robot_cirSize = 0) :#ax:描画するサブプロット実体 elems:グラフ図に描画するための図形リスト
x_frRcom,y_frRcom,theta_frRcom = cam_pos #camera_poseだが、今回はWorld座標系のカメラの位置とロボットの位置は同一とみなしている
for obs in self.obs_data:
#観測イベントにより描画を変更する
if obs[0] is None :
obsEvent = obs[2]
if KindofObsEventNoise.overRange not in obsEvent:
if KindofObsEventNoise.oversight in obsEvent:
elems.append(ax.text(-7.5,-7.5,"************\n"+"Occurred oversight\n************",fontsize=10))
else:
dis_frRCom , phi_frRCom , obsEvent = obs[0][0],obs[0][1],obs[2]
lx = x_frRcom + dis_frRCom * math.cos(theta_frRcom + phi_frRCom)
ly = y_frRcom + dis_frRCom * math.sin(theta_frRcom + phi_frRCom)
if KindofObsEventNoise.phantom in obsEvent :
elems += ax.plot([x_frRcom,lx],[y_frRcom,ly],color="orange",linewidth = 3,linestyle="--") #MatPlotlibの特長で、plot([スタートX座標,X線分長さ],[スタートY座標,Y線分長さ])と書くと、
#長さ範囲分だけ描画する
elems.append(ax.text(-7.5,-8.5,"************\n"+"Observed phantom\n************",fontsize=10))
elif KindofObsEventNoise.occulusion in obsEvent:
elems += ax.plot([x_frRcom,lx],[y_frRcom,ly],color="blue",linewidth = 3,linestyle="--") #MatPlotlibの特長で、plot([スタートX座標,X線分長さ],[スタートY座標,Y線分長さ])と書くと、
#長さ範囲分だけ描画する
elems.append(ax.text(-7.5,-6.5,"************\n"+"Occurred occulusion\n************",fontsize=10))
else :
elems += ax.plot([x_frRcom,lx],[y_frRcom,ly],color="green",linewidth = 3) #MatPlotlibの特長で、plot([スタートX座標,X線分長さ],[スタートY座標,Y線分長さ])と書くと、
#長さ範囲分だけ描画する
#SensorRange範囲の描画
lx_range1 = x_frRcom + (self.distance_range[1]-self.distance_range[0]) * math.cos(theta_frRcom + self.camera_phi_range[0])
ly_range1 = y_frRcom + (self.distance_range[1]-self.distance_range[0]) * math.sin(theta_frRcom + self.camera_phi_range[0])
lx_range2 = x_frRcom + (self.distance_range[1]-self.distance_range[0]) * math.cos(theta_frRcom + self.camera_phi_range[1])
ly_range2 = y_frRcom + (self.distance_range[1]-self.distance_range[0]) * math.sin(theta_frRcom + self.camera_phi_range[1])
lx_rangestart = x_frRcom + (robot_cirSize + self.distance_range[0]) * math.cos(theta_frRcom)
ly_rangestart = y_frRcom + (robot_cirSize + self.distance_range[0]) * math.sin(theta_frRcom)
elems += ax.plot([lx_rangestart,lx_range1],[ly_rangestart,ly_range1],color=(0, 0, 0.3, 0.4))
elems += ax.plot([lx_rangestart,lx_range2],[ly_rangestart,ly_range2],color=(0, 0, 0.3, 0.4))
x_rangefill = (lx_rangestart,lx_range1,lx_range2)
y_rangefill = (ly_rangestart,ly_range1,ly_range2)
elems += ax.fill(x_rangefill,y_rangefill,color = (0, 0, 0.2, 0.1)) #Sensor range内を塗りつぶす
| true |
cc770d9a51f95c4a9dc7dc74e3b2bfd8819a9478 | Python | nmounzih/weather-report | /weather.py | UTF-8 | 524 | 2.78125 | 3 | [] | no_license | import requests
from weather_classes import Current_conditions, Sunrise_set, Hurricane, Alert, TenDay
def main():
zipcode = input("Enter zipcode: ")
r = requests.get('http://api.wunderground.com/api/b042b9ca2dcb4435/alerts/currenthurricane/forecast10day/astronomy/conditions/q/{}.json'.format(zipcode))
report = r.json()
print(Current_conditions(report))
print(Sunrise_set(report))
print(Hurricane(report))
print(Alert(report))
print(TenDay(report))
if __name__ == '__main__':
main()
| true |
ee1e7c26e26ee612511da1f5168352969a89f0dc | Python | pauleveritt/wired_injector | /examples/protocols/simple_protocols/__init__.py | UTF-8 | 808 | 2.75 | 3 | [
"MIT"
] | permissive | from typing import Tuple, Mapping, List
from .models import (
FrenchCustomer,
FrenchGreeter,
RegularCustomer,
RegularGreeter,
)
from .protocols import Customer, Greeter
def test() -> Tuple[List[str], List[str]]:
customers: Tuple[Customer, ...] = (
RegularCustomer(), FrenchCustomer(),
)
greetings = []
# Oops: The pain!! Time for some Any
greeters: Mapping[Customer, Greeter] = {
RegularCustomer: RegularGreeter(),
FrenchCustomer: FrenchGreeter(),
}
for customer in customers:
# Oops: PyCharm doesn't like this without the above
greeter = greeters[customer.__class__]
greeting = greeter.greet(customer)
greetings.append(greeting)
expected = ['Hello Marie!', 'Salut Sophie!']
return expected, greetings
| true |
b17f7ed4ccba88b15de9d32f72ab1f629a043887 | Python | 173647085/practice | /homework7/7_3.py | UTF-8 | 1,354 | 2.953125 | 3 | [] | no_license | # -*- encoding: utf-8 -*-
'''
@File : 7_2.py
@Time : 2020/04/25 17:52:36
@Author : zxl
@Version : 1.0
@Contact : 173647085@qq.com
3给定一个网址(包含了优质的英语学习音频文件),http://www.listeningexpress.com/studioclassroom/ad/; 请大家写一个爬虫,将里面的英语节目MP3,都下载下来;
要求大家使用Requests库获取这个网页html文本内容,并且使用正则表达式获取里面所有的mp3文件的网址;并进行下载;
#Windows上的wget可以点击这里 下载。 这个程序不用安装,直接在命令行里使用即可;
'''
# here put the import lib
import requests
import re,os,wget
from urllib.parse import quote
from urllib import request
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}
link = 'http://www.listeningexpress.com/studioclassroom/ad/'
response = requests.get(link, headers=headers)
html = response.text
src=r"sc-ad [^\"]*\.mp3"
mp3 = re.compile(src)
list1 = re.findall(mp3, html)
list1 = list(set(list1))
# os.chdir('homework7')
for i in list1:
print('下载中', i)
try:
wget.download(link + i)
print('文件{}下载成功'.format(i))
except request.HTTPError:
print('文件{}下载失败'.format(i))
print('任务完成!')
| true |
3cb89939bc3f0acc301fd69c2cf869e02f9626c5 | Python | TodimuJ/Python | /mnist_number.py | UTF-8 | 1,801 | 3.109375 | 3 | [] | no_license | import numpy as np
import mnist
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
train_images = mnist.train_images() #training data images
train_labels = mnist.train_labels() #training data labels
test_images = mnist.test_images() #test data images
test_labels = mnist.test_labels() #test data labels
#Normalize the values from [-0.5 - 0.5]
train_images = (train_images/255) - 0.5
test_images = (test_images/255) - 0.5
#Flatten the 28x28 pixel image to a 784 row vector
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1, 784))
print(train_images.shape) #60000 rows 784 columns
print(test_images.shape) #10000 rows 784 columns
#Build the model
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=784))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
#Compile
model.compile(
optimizer= 'adam',
loss='categorical_crossentropy',
metrics = ['accuracy']
)
#Train images
model.fit(
train_images,
to_categorical(train_labels), #10 dimensional vector
epochs = 5, # number of iterations over the entire data set
batch_size = 32 #number of samples per gradient update for training
)
#Evaluate the model
model.evaluate(
test_images,
to_categorical(test_labels)
)
#predict on first 5 images
predictions = model.predict(test_images[:5])
#print models predictions
print(np.argmax(predictions, axis = 1))
print(test_labels[:5])
for i in range(0,5):
first_image = test_images[i]
first_image = np.array(first_image, dtype = 'float')
pixels = first_image.reshape((28, 28))
plt.imshow(pixels)
plt.show() | true |
68250738c7b915911ed3720e0a26b568d2049d9f | Python | benjdj6/Hackerrank | /ProjectEuler/13-LargeSum.py | UTF-8 | 181 | 3.109375 | 3 | [] | no_license | # Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(raw_input())
sum = 0
for i in range(n):
sum += int(raw_input())
print ''.join(list(str(sum))[0:10]) | true |
839d79968a352465c30653655da698d9e115abf0 | Python | valerydec17/learn-it | /Learn/Scrapper-2/tutsplus/tutsplus/spiders/myyoutspider.py | UTF-8 | 3,594 | 2.609375 | 3 | [] | no_license | from scrapy.spiders import Spider
from tutsplus.items import TutsplusItem
from tutsplus.items import AlternativetoItem
from tutsplus.items import YoutubeItem
from scrapy.http import Request
import re
import os
from bs4 import BeautifulSoup
import pdb
crawledLinks = []
class MySpider(Spider):
name = "myyout"
allowed_domains = ["youtube.com"]
start_urls = ["https://www.youtube.com/user/VEVO"]
def parse(self, response):
os.system('echo ' + ' !!!!!!!!!!!!! ' + ' >> yout_log.txt')
os.system('echo ' + ' '.join(crawledLinks) + ' >> yout_log.txt')
# Parse Item
# item = AlternativetoItem()
# item["title"] = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/h1/text()').extract()[0]
# item["likes"] = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/div/div[2]/span/text()').extract()
# item["description"] = response.xpath('/html/body/form/section/div[2]/div[1]/div[1]/div/span/text()').extract()
# item["officialSite"] = response.xpath('/html/body/form/section/div[2]/div[3]/div[1]/div/a[2]/text()').extract()
# os.system('echo Hi ' + ' '.join(response.xpath('/html/body/form/section/div[2]/header/div[1]/div/h1/text()').extract() ) + ' >> items.txt')
str_title = ""
str_shortDescription = ""
item = YoutubeItem()
# if statements should be put here
# Grabs the loaded page for desirable items and perks.
try:
pdb.set_trace()
# //*[@id="text"]
# //*[@id="top-level-buttons"]/ytd-toggle-button-renderer[1]
# //*[@id="button"]
# //*[@id="text"]
# item["title"] = response.xpath('/html/body/ytd-app/div[1]/ytd-page-manager/ytd-watch/div[2]/div[2]/div/div[6]/div[2]/ytd-video-primary-info-renderer/div/h1/yt-formatted-string/text()').extract()[0]
# item["liked"] =
# item["disliked"] = response.xpath('//*[@id="text"]/text()').extract()[0]
# item["channelName"] =
# str_title = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/h1/text()').extract()[0]
# str_shortDescription = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/p/text()').extract()[0]
# item["atitle"] = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/h1/text()').extract()[0].strip()
# item["shortDescription"] = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/p/text()').extract()[0].strip()
# item["likes"] = response.xpath('/html/body/form/section/div[2]/header/div[1]/div/div/div[2]/span/text()').extract()[0].strip()
# item["description"] = response.xpath('/html/body/form/section/div[2]/div[1]/div[1]/div/span/p/text()').extract()[0].strip()
# item["officialSite"] = response.xpath('/html/body/form/section/div[2]/div[3]/div[1]/div/a[2]/@href').extract()[0].strip()
except:
print "Item not found on the page"
print str_title, str_shortDescription
# pdb.set_trace()
# Parse links
links = response.xpath('//a/@href').extract()
linkPattern = re.compile("\/watch\?")
linksToCrawl = []
for link in links:
if linkPattern.match(link) and not link in crawledLinks:
linksToCrawl.append(link)
crawledLinks.append(link)
for link in linksToCrawl:
link = "https://www.youtube.com" + link
print "New link started: ", link, " CrawledPages: ", str(len(crawledLinks))
os.system('echo ' + 'Link num. : ' + str(len(crawledLinks)) + ' Link : ' + link + ' >> yout_log.txt')
# crawledLinks.append(link)
yield Request(link, self.parse)
# # Avoids blanket lines in the output. Which for some reasons can occur.
# if len(item["atitle"]) > 0:
# yield item
| true |
014619c1125ca3ae8d050d1a2d07fe19b78e8182 | Python | mayurkadampro/Tsunami-Warning-System | /Tsunami Warning System.py | UTF-8 | 1,800 | 3.046875 | 3 | [] | no_license |
from bs4 import BeautifulSoup
import requests
from pygame import mixer
if __name__ == '__main__':
High_Tide = ''
High_Tide_Limit = '20.00'
Low_Tide = ''
Low_Tide_Limit = '2.00'
#it's play the siren for High Tide
def alert():
mixer.init()
alert = mixer.Sound('Sirens_2.wav')
alert.play()
#it's play the sound for very low tide
def alert1():
mixer.init()
alert=mixer.Sound('beep-07.wav')
alert.play()
while True:
url = "https://www.tide-forecast.com/locations/Bombay-India/tides/latest"
data = requests.get(url)
soup = BeautifulSoup(data.text,'html.parser')
hightide = soup.select('.level > span')
if hightide:
High_Tide = hightide[1].text.strip('ft')
print(High_Tide," - High_tide")
lowtide = soup.select('.level > span')
if lowtide:
Low_Tide = hightide[0].text.strip('ft')
print(Low_Tide," - Low_tide")
if High_Tide > High_Tide_Limit:
print("Water Level Is Too High")
alert() #Actuators For High Tide
elif Low_Tide < Low_Tide_Limit:
print("Water Level Is Low")
alert1() #Actuators For Low Tide
else:
print("All Right")
'''
print(soup.prettify())
level = soup.find_all(class_='level')
for imp in soup.find_all('span',class_='imperial'):
for hightide in imp:
hightide = imp.text.strip('ft')
High_Tide = [hightide.strip(' ')]
print(High_Tide)
filename = 'tide.csv'
with open(filename, 'wb') as csvfile:
filewriter = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Name', 'Profession'])
filewriter.writerow(['Derek', 'Software Developer'])
filewriter.writerow(['Steve', 'Software Developer'])
filewriter.writerow(['Paul', 'Manager'])
'''
| true |
7988e57a226f7ee54f51f2f3ebbe2d5d5f27ed05 | Python | alexandraback/datacollection | /solutions_5738606668808192_1/Python/Loci/C-large.py | UTF-8 | 1,153 | 2.75 | 3 | [] | no_license | cases = int(raw_input())
from time import time
def isPrime(n):
if n%2==0: return 2
startTime = time()
i = 3
while i < int(n**0.5)+1:
if n%i==0:
return i
# timeout for lame coins
if time() - startTime > 3:
return -1
i += 2
return -1
def isJamCoin(binaryCoin):
deviders = []
for i in range(2,11):
#debugzor("@ devider " + str(i) + " for " + binaryCoin)
tmp = isPrime(int(binaryCoin,i))
if tmp == -1:
return False
else:
deviders.append(tmp)
return deviders
from random import randint
def getRandInnerCoin(n):
return [str(randint(0,1)) for i in range(n-2)]
def debugzor(s):
debug = False
if debug:
print s
for case in range(cases):
print "Case #" + str(case+1) + ":"
N,J = [int(i) for i in raw_input().split()]
coinTrace = []
# produce J jamcoins of length N
jamcoinsLeft = J
while jamcoinsLeft > 0:
#debugzor("Generating randcoin")
gen = ''.join(getRandInnerCoin(N))
coin = '1' + gen + '1'
if coin in coinTrace:
continue
tmp = isJamCoin(coin)
if tmp != False:
coinTrace.append(coin)
jamcoinsLeft -= 1
print coin + ' ' + ' '.join([str(i) for i in tmp])
| true |
74b3d1f99351ac7a7695d56e3057259f9aa419a4 | Python | mpingram/yearly-grade-comparison-report | /scripts/data_access.py | UTF-8 | 295 | 3.015625 | 3 | [] | no_license | import pandas as pd
def get_grade_df(filepath):
df = pd.read_csv(filepath)
# add column with ClassName (SubjectName + StudentHomeroom)
df["ClassName"] = df.apply(lambda row:
"{} ({})".format(row.loc["SubjectName"], row.loc["StudentHomeroom"]), axis=1)
return df
| true |
2927208b266568501c1dc105134471cc28839886 | Python | bgossage/biomath | /simple_epidemic.py | UTF-8 | 6,387 | 3.125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed March 14, 2018
@author: bgossage
"""
"""
A simple epidemic model
Mathematical Biology, J.D. Murray
Chapter 19.1
Given a disease in which recovery confers immunity:
S(t) .=. Susceptibles
I(t) .=. Infectives
R(t) .=. Removed (immune, or isolated)
D(t) .=. Deaths
(Often called "SIR" models.)
Assumptions:
1) The rate increase of infectives (r) is proportional to the number of
infectives and susceptibles
2) The susceptibles are lost at the same rate r.
3) The rate of removal of infectives (a) is proportional to the number of Infectives
that is = a * I
4) The incubation time is neglibile
5) The poplulation is a constant N s.t. S + I + R = N
6) dti .=. incubation period
The ODE system is:
dS/dt = -r * S(t) * I(t)
dI/dt = r * S(t) * I(t) - (a+d) * I(t)
dR/dt = (a-d) * I(t)
dD/dt = d * I(t)
Challenge: Show that an epidemic can only occur if S(0) > a/r.
Epidemic <= for some t, I(t) > I0
Below is the solution in python using numerical integration.
"""
import numpy
import scipy.integrate
from datetime import datetime
ny_data = numpy.genfromtxt('coronavirus-data/case-hosp-death.csv',
delimiter=',',dtype=None,encoding="utf8" )
import matplotlib
import matplotlib.pyplot
# Parameters
r = 0.4 # The infection rate
a = 0.01 # removal rate
distancing_factor = 0.5 # 0.5
recovery_rate = 0.1
death_rate = 0.01
quarantine_rate = 0.0 # 0.1
#r *= 1.0 - distancing_factor
a = quarantine_rate + recovery_rate;
d = death_rate
ld_start = 15.0 # start of lockdown
ld_end = 45.0 # end of lockdown
#S0 = a / r
print( "recovery rate = ", a )
print( "infection rate = ", r )
#print( "R0 = ", S0 )
conditions = "no intervention"
#
# Define a function that computes the derivatives at time t
# given the current populations of S, I, and R
#
def deriv( y, t, params ):
S, I, R, D = y # unpack current values
r, a, d = params # unpack parameters
derivs = [ -r * S*I, # Suseptibles
r * S*I - (a+d)*I, # Infectives
(a) * I, # Survived/Immune
d * I ] # Deaths
return derivs
# end function derivs ~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initial values
I0 = 0.0001
R0 = 0.0
S0 = 1.0 - R0 - I0
D0 = 0.0
# Bundle parameters for ODE solver
params = [ r, a, d ]
# Bundle initial conditions for ODE solver
y0 = [ S0, I0, R0, D0 ]
# Make a time array for solution samples...
tStart = 0.0
tStop = ld_start
tInc = 0.010
t_0 = numpy.arange(tStart, tStop, tInc)
# Solve using numerical integration...
psoln_0 = scipy.integrate.odeint( deriv, y0, t_0, args=(params,) )
tStart = ld_start
tStop = ld_end
r_start = r;
r *= (1.0 - distancing_factor)
params = [ r, a, d ]
t_1 = numpy.arange(tStart, tStop, tInc)
# Bundle initial conditions for ODE solver
last = len(psoln_0[:,0])-1
S0 = psoln_0[last,0]
I0 = psoln_0[last,1]
R0 = psoln_0[last,2]
D0 = psoln_0[last,3]
y0 = [ S0, I0, R0, D0 ]
# Solve using numerical integration...
psoln_1 = scipy.integrate.odeint( deriv, y0, t_1, args=(params,) )
tStart = ld_end
tStop = 200.0
r = r_start
quarantine_rate = 0.0 # 0.1
a = quarantine_rate + recovery_rate;
params = [ r, a, d ]
t_2 = numpy.arange(tStart, tStop, tInc)
# Bundle initial conditions for ODE solver
last = len(psoln_1[:,0])-1
S0 = psoln_1[last,0]
I0 = psoln_1[last,1]
R0 = psoln_1[last,2]
D0 = psoln_1[last,3]
y0 = [ S0, I0, R0, D0 ]
# Solve using numerical integration...
psoln_2 = scipy.integrate.odeint( deriv, y0, t_2, args=(params,) )
Ssoln_0 = psoln_0[:,0]
Isoln_0 = psoln_0[:,1]
Rsoln_0 = psoln_0[:,2]
Dsoln_0 = psoln_0[:,3]
Ssoln_1 = psoln_1[:,0]
Isoln_1 = psoln_1[:,1]
Rsoln_1 = psoln_1[:,2]
Dsoln_1 = psoln_1[:,3]
Ssoln_2 = psoln_2[:,0]
Isoln_2 = psoln_2[:,1]
Rsoln_2 = psoln_2[:,2]
Dsoln_2 = psoln_2[:,3]
# Plot the solution...
#matplotlib.pyplot.plot( t, Ssoln, label="Susceptibles" )
matplotlib.pyplot.figure( 0, figsize=(6,4) )
#matplotlib.pyplot.plot( t, Isoln, label="Infectives, " + conditions, linestyle='dashed' )
#matplotlib.pyplot.plot( t_0, Rsoln_0, label="Immune Before", linestyle=':', color='g' )
#matplotlib.pyplot.plot( t_1, Rsoln_1, label="Immune Shut", linestyle='--', color='g' )
#matplotlib.pyplot.plot( t_2, Rsoln_2, label="Immune Restart", linestyle='-.', color='g' )
#matplotlib.pyplot.plot( t_0, Dsoln_0, label="Total Death Before", linestyle=':', color='k' )
#matplotlib.pyplot.plot( t_1, Dsoln_1, label="Total Deaths Shut", linestyle='--', color='k' )
matplotlib.pyplot.plot( t_2, Dsoln_2, label="Deaths After Restart", linestyle='-.', color='r' )
#matplotlib.pyplot.plot( t_0, Isoln_0, label="Infectives Before", linestyle=':', color='r' )
#matplotlib.pyplot.plot( t_1, Isoln_1, label="Infectives Shut", linestyle='--', color='r' )
#matplotlib.pyplot.plot( t_2, Isoln_2, label="Infectives Restart", linestyle='-.', color='r' )
#matplotlib.pyplot.axvline(x=ld_start,label="start lockdown", color='g' )
#matplotlib.pyplot.axvline(x=ld_end,label="end lockdown", color='r' )
matplotlib.pyplot.title( "Epidemic" )
matplotlib.pyplot.legend( loc='best' )
matplotlib.pyplot.xlabel( "t (days)" )
matplotlib.pyplot.ylabel( "Population Fraction" )
matplotlib.pyplot.show()
# Read data...
#matplotlib.pyplot.figure(1)
total_pop = 1585873
total_pop = 1.0E5
ny_data = numpy.genfromtxt('coronavirus-data/case-hosp-death.csv',
delimiter=',',dtype=None,encoding="utf8" )
datestrings = ny_data[1:,0]
deaths = ny_data[1:,3]
date0 = datetime.strptime(datestrings[0], '%m/%d/%y')
print( date0 )
last = len(Dsoln_2)-1
print( "Final Death Toll: ", Dsoln_2[last])
size = datestrings.size
case_data = numpy.zeros(shape=(size,2))
row = 0
cum_deaths = 0
for datestr in datestrings:
date = datetime.strptime( datestr, '%m/%d/%y')
timedelta = date - date0
case_data[row,0] = timedelta.days
if deaths[row]:
cum_deaths += float(deaths[row])
case_data[row,1] = cum_deaths
else:
case_data[row,1] = 0.0
row += 1
#matplotlib.pyplot.title( "NY Data" )
#matplotlib.pyplot.legend( loc='best' )
#matplotlib.pyplot.xlabel( "t (days)" )
#matplotlib.pyplot.ylabel( "Fraction" )
#case_data[:,1] /= total_pop
#matplotlib.pyplot.plot( case_data[:,0], case_data[:,1], label="Deaths" )
#matplotlib.pyplot.legend( loc='best' )
#matplotlib.pyplot.show()
# EOF
| true |
ccff5a1a0aaa9f9edb51862a94273a8e67246dbd | Python | konoufo/gif4101_E8D1 | /devoir1/q4/q4c.py | UTF-8 | 4,785 | 3.203125 | 3 | [] | no_license | import numpy as np
from sklearn.utils import check_X_y
class CustomBayes:
def __init__(self, cout_lambda=0.4):
self.cout_lambda = abs(cout_lambda)
self.check_lambda()
self.classes = None
self.variances = None
self.means = None
self.priors = None
self.rejet_label = None
self.is_fitted = False
def check_lambda(self):
if self.cout_lambda < 0:
raise ValueError('Le coût de rejet est compris entre 0 et 1.')
def _means(self, X, y):
"""Retourne la matrice des moyennes intra-classe
:return: (array) matrice de dimension (n_classes, n_features)
"""
means = []
for r in self.classes:
means.append(X[y == r, :].mean(0))
self.means = np.asarray(means)
return self.means
def _variances(self, X, y):
"""Calcule la variance par classe
:return: (array) vecteur de dimension (n_classes,1) des variances intra-classes sigma_i
"""
variances = []
N = self.classes.shape[0]
for r in self.classes:
variances.append(np.concatenate(X[y == r, :]).var(0))
self.variances = np.asarray(variances)
return self.variances
def _priors(self, X, y):
self.priors = np.asarray([X[y == r, :].shape[0] for r in self.classes]) / X.shape[0]
return self.priors
def fit(self, X, y):
"""Calculer les paramètres du maximum de vraisemblance et retourner le classifieur résultant.
:param X: la matrice des échantillons d'entrainement
:param y: le vecteur des etiquettes correspondant
:return: self
"""
X, y = check_X_y(X, y)
self.classes = np.unique(y)
self._means(X, y)
self._variances(X, y)
self._priors(X, y)
self.is_fitted = True
self.rejet_label = np.max(self.classes) + 1
return self
def predict(self, X):
"""Retourner le classement à partir des probabilites a posteriori
:param X:
:return:
"""
posteriori = self.predict_proba(X)
print('Probabilites a posteriori:\n{} \n'.format(posteriori))
classement = self.classes[np.argmax(posteriori, axis=1)]
classement[np.max(posteriori, axis=1) < (1 - self.cout_lambda)] = self.rejet_label
print('Prediction:\n{}\n'.format(classement))
return classement
def predict_proba(self, X):
"""Retourner les probabilités a posteriori pour chaque entrée et chaque classe.
:param X: matrice des entrees
:return: (array) matrice des probabilités de dimension (n_entrees, n_classes)
"""
if not self.is_fitted:
raise AssertionError('Entrainer avec la methode fit(X, y) d\'abord.')
means, variance = self.means, self.variances
l = len(X[:, 0])
d = len(X[0, :])
c = self.classes.shape[0]
pxc = np.zeros((l, c))
for j in range(0, c):
for i in range(0, l):
var = variance[j]
xu = X[i, :] - means[j, :]
xu2 = np.dot(xu, xu)
ex = np.exp(-0.5 * xu2 / var)
coef = ((2 * np.pi) ** (0.5 * d)) * np.sqrt(var)
pxc[i, j] = ex * (1 / coef)
priors = np.array([1 / 3, 1 / 3, 1 / 3])
deno = np.zeros(l)
for i in range(0, l):
den = (priors[0] * pxc[i, 0]) + (priors[1] * pxc[i, 1]) + (priors[2] * pxc[i, 2])
deno[i] = den
pcx = np.zeros((l, c))
for i in range(0, l):
for j in range(0, c):
p = priors[j] * pxc[i, j]
pcx[i, j] = p / deno[i]
return pcx
def score(self, X, y):
"""Retourne le cout total du classement (>=0). Plus le score est bas, plus le classement est bon."""
classement = self.predict(X)
non_rejet = (classement[:, :] != self.rejet_label)
cout = np.count_nonzero(y[non_rejet, :] - classement[non_rejet, :])
cout += (classement.shape[0] - classement[non_rejet].shape[0]) * self.cout_lambda
return cout
if __name__ == '__main__':
# essayer python3 -m q4.q4c
from q3.utils import Testeur, Traceur
for clf in [CustomBayes(cout_lambda=1.2), CustomBayes(cout_lambda=0.4)]:
error = Testeur(clf).compute_error()
notice = 'sans rejet' if clf.cout_lambda > 1 else 'avec rejet'
print('L\'erreur empirique pour le classifieur {notice} est {e:0.3f} [lambda={l}].'.format(e=error,
l=clf.cout_lambda,
notice=notice))
| true |
ca67f4bd905fd068a6c2046382c5e20289948f74 | Python | Villa01/OLC2_P2_201900907 | /Structure/Driver.py | UTF-8 | 5,015 | 2.859375 | 3 | [] | no_license | from Structure.SymbolTable.Type import get_stype
from Structure.SymbolTable.Symbol import Symbol
from Structure.SymbolTable.SymbolTable import SymbolTable
import sys
sys.path.append('../')
from Structure.AST.Error import Err, errors
class Driver:
def __init__(self) -> None:
self.error = []
self.symbolTable = []
self.console = ""
self.size = 0
def append(self, text):
self.console += text
def addError(self, t, description, line, column):
error = Err(t, description, line, column)
self.error.append(error)
self.appendToConsole(f'{description} Linea: {line}, Columna: {column}')
def appendToConsole(self, text):
self.console += text + "\n"
def agregarError(self, msg: str, line: int, column: int, t="SEMANTIC"):
err = Err(t, msg, line, column)
self.error.append(err)
self.append(msg + ' Linea: ' + str(line) + ' Columna ' + str(column) + '\n')
def graficar_er(self, controlador, ts):
cuerpohtml = "<html><header><title>ReportedeErrores</title><style>#tabla {font-family:Arial,Helvetica," \
"sans-serif;border-collapse:collapse;width:100%;}#tabla td,#tabla th{" \
"border:1pxsolid#ddd;padding:8px;}#tabla tr:nth-child(even){background-color:#f2f2f2;}#tabla " \
"tr:hover{background-color:#ddd;}#tabla th{" \
"padding-top:12px;padding-bottom:12px;text-align:left;background-color:#04AA6D;color:white" \
";}</style></header><body> "
cuerpohtml += "<table id=\"tabla\">"
cuerpohtml += "<thead>"
cuerpohtml += "<tr>" + "<td colspan=\"6\">Tabla de Errores</td>" + "</tr>" + "<tr>" + "<th>No.</th>" \
"<th>Tipo</th>" + "<th>Descripción</th>" + "<th>Linea</th>" + "<th>Columna</th>" + "</tr>" \
"</thead> "
numero = 1
for sim in self.error:
cuerpohtml += "<tr>" + "<td>" + str(numero) + "</td><td>"
cuerpohtml += sim.type + "</td><td>" + sim.description + "</td><td>" + str(sim.line) + "</td><td>"
cuerpohtml += str(sim.column) + "</td>" + "</tr>"
numero += 1
cuerpohtml += '</body></html>'
return cuerpohtml
def graficar_st(self, controlador, ts):
cuerpohtml = "<html><header><title>ReporteTablaSimbolos</title><style>#tabla {font-family:Arial,Helvetica," \
"sans-serif;border-collapse:collapse;width:100%;}#tabla td,#tabla th{" \
"border:1pxsolid#ddd;padding:8px;}#tabla tr:nth-child(even){background-color:#f2f2f2;}#tabla " \
"tr:hover{background-color:#ddd;}#tabla th{" \
"padding-top:12px;padding-bottom:12px;text-align:left;background-color:#04AA6D;color:white" \
";}</style></header><body> "
cuerpohtml += "<table id=\"tabla\">"
cuerpohtml += "<thead>"
cuerpohtml += "<tr>" + "<td colspan=\"6\">Tabla de Simbolo</td>" + "</tr>" + "<tr>" + "<th>Rol</th>"\
"<th>Nombre</th>" + "<th>Tipo</th>" + "<th>Ambito</th>" + "<th>Valor</th>" + "<th>Paramtros</th>"\
"</tr>" + "</thead> "
numero = 1
for tabla in self.symbolTable:
for sim in tabla.table.values():
cuerpohtml += "<tr>" + "<td>" + self.getRol(sim) + "</td><td>"
cuerpohtml += sim.identifier + "</td><td>" + self.getType(sim) + "</td><td>" + self.getEnv(
tabla) + "</td><td>"
cuerpohtml += f'{self.getValor(sim)}</td><td>{self.parametros(sim)}</td></tr>'
numero += 1
cuerpohtml += '</body></html>'
return cuerpohtml
def agregarTabla(self, table):
self.symbolTable.append(table)
def getValor(self, sim: Symbol):
if sim.value is not None:
if type(sim.value) == str:
return sim.value
elif not type(sim.value) == str:
return str(sim.value)
else:
return '...'
else:
return '...'
def getType(self, sim: Symbol):
try:
return sim.symbol_type.stype
except AttributeError:
return get_stype(sim.symbol_type)
def getRol(self, sim: Symbol):
try:
return sim.symbol_type.stype
except AttributeError:
return get_stype(sim.symbol_type)
def getEnv(self, table: SymbolTable):
if table.env:
return table.env
else:
return '...'
def parametros(self, sim: Symbol):
if sim.param_list:
return len(sim.param_list)
else:
return '...'
| true |
81f282c2e26317df2ab619cb0eff4de3a3f4cf63 | Python | erickmendonca/GDGAjuBot | /tests/test_util.py | UTF-8 | 2,356 | 2.625 | 3 | [] | no_license | import json
from pathlib import Path
from gdgajubot.util import BotConfig
def test_botconfig_init():
config = BotConfig(group_name="JLA,TT", events_source="CIE,BN", database_url=None)
assert config.group_name == ["JLA", "TT"]
assert config.events_source == ["CIE", "BN"]
assert config.database == {
"provider": "sqlite",
"filename": "database.sqlite",
"create_db": True,
}
def test_botconfig_init_with_config_file():
test_config_path = Path(__file__).parent / "fixtures" / "test_config.yaml"
config = BotConfig(config_file=str(test_config_path))
assert config.debug_mode is True
assert config.events_source == ("meetup",)
assert config.telegram_token == "TELEGRAM_TOKEN"
assert config.facebook_key == "FACEBOOK_API_KEY"
assert config.meetup_client_id == "MEETUP_CLIENT_ID"
assert config.meetup_client_secret == "MEETUP_CLIENT_SECRET"
assert config.meetup_refresh_token == "MEETUP_REFRESH_TOKEN"
assert config.links == {"link1": "url_1", "link2": "url_2"}
assert config.custom_responses == {"/custom_1": "This is a custom bot command."}
def test_botconfig_load_database_from_config_file(tmp_path: Path):
config_content = {
"database": {
"provider": "non_sqlite",
"filename": "database.sqlite",
"create_db": True,
}
}
test_config_path = tmp_path / "test_config.yaml"
test_config_path.write_text(json.dumps(config_content))
config = BotConfig()
config.load_config_file(str(test_config_path))
assert config.database == config_content["database"]
def test_botconfig_parse_database_from_config_file(tmp_path: Path):
config = BotConfig()
test_config_path = tmp_path / "test_config.yaml"
test_config_path.write_text(
'{"database_url":"postgres://USER:PASSWORD@1.2.3.4:5432/DBNAME"}'
)
config.load_config_file(str(test_config_path))
assert config.database == {
"provider": "postgres",
"host": "1.2.3.4",
"port": 5432,
"user": "USER",
"password": "PASSWORD",
"database": "DBNAME",
}
test_config_path.write_text('{"database_url":"sqlite:///database.db"}')
config.load_config_file(str(test_config_path))
assert config.database == {"provider": "sqlite", "filename": "database.db"}
| true |
1c0dbbae3fee730d7166b5d02deb470b211ffb92 | Python | jmacach1/flask_crud | /app/database/__init__.py | UTF-8 | 2,230 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python3
# """Database operations"""
from flask import g # context; global
import sqlite3
ID = "id"
FIRST_NAME = "first_name"
LAST_NAME = "last_name"
HOBBIES = "hobbies"
DATABASE="user_db" # database filename
def get_db():
db = getattr(g, "_database", None) # None is the default
if not db:
db = g._database = sqlite3.connect(DATABASE)
return db
def output_formatter(results: tuple):
out = []
for result in results:
res_dict = {}
res_dict[ID] = result[0]
res_dict[FIRST_NAME] = result[1]
res_dict[LAST_NAME] = result[2]
res_dict[HOBBIES] = result[3]
out.append(res_dict)
return out
def scan():
cursor = get_db().execute("SELECT * FROM user", ())
results = cursor.fetchall()
cursor.close()
return output_formatter(results)
def create(first_name, last_name, hobbies):
value_tuple = (first_name, last_name, hobbies)
query = """
INSERT into user (first_name, last_name, hobbies) VALUES (?,?,?)
"""
cursor = get_db()
last_row_id = cursor.execute(query, value_tuple).lastrowid
cursor.commit()
return last_row_id
def findOne(id):
cursor = get_db().execute("SELECT * FROM user WHERE id = ?", (str(id)))
result = cursor.fetchone()
if not result:
return None
return {
ID : result[0],
FIRST_NAME : result[1],
LAST_NAME : result[2],
HOBBIES : result[3]
}
def update(id, first_name, last_name, hobbies):
user = findOne(id)
if user is None:
return None
first_name = first_name or user[FIRST_NAME]
last_name = last_name or user[LAST_NAME]
hobbies = hobbies or user[HOBBIES]
parameters = (first_name, last_name, hobbies, id)
query = """
UPDATE user
SET
first_name = ?,
last_name = ?,
hobbies = ?
WHERE
id = ?
"""
cursor = get_db()
cursor.execute(query, parameters)
cursor.commit()
return findOne(id)
def delete(id):
user = findOne(id)
if not user:
return "User not found"
query = """
DELETE FROM user
WHERE id = ?
"""
cursor = get_db()
cursor.execute(query, str(id))
cursor.commit()
if findOne(id) is None:
return f"Success! Deleted user {id}"
return f"Fails! unable to delete user {id}"
| true |
0d5eaeb91a47ded83c34ccbcf8d8b9af8eb23023 | Python | bmei1/Machine_Learning_Algorithm | /model_selection.py | UTF-8 | 475 | 2.71875 | 3 | [] | no_license | import numpy as np
def train_test_split(X, y, test_ratio = 0.2, seed = None):
if seed:
np.random.seed(seed)
shuffled_index = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
train_index = shuffled_index[test_size:]
test_index = shuffled_index[:test_size]
X_train = X[train_index]
y_train = y[train_index]
X_test = X[test_index]
y_test = y[test_index]
return X_train, X_test, y_train, y_test
| true |
c2b193302d7293286b461b24ad39d91cb0233ed9 | Python | Sushmita-08june/cat | /Tuple.py | UTF-8 | 892 | 4.09375 | 4 | [] | no_license | #Tuple
tuple1 = (7,8,2,3,4,"Python","Hi")
print(tuple1)
print(type(tuple1))
print(len(tuple1))
tuple2 = (8.0,7.23,False,True,9)
print(tuple2)
print(tuple2[2])
print(tuple1[-1])
#Concat
tuple3 = tuple1 + tuple2
print(tuple3)
tuple4 = tuple3 + (7,8,9)
print(tuple4)
tuple5 = (2,0,1) + (3,2,4)
print(tuple5)
#Multiply
tuple6 = tuple5 * 3
print(tuple6)
#Slicing/indexing
tuple7 = tuple6[-8:]
print(tuple7)
tuple8 = tuple7[2:5] + tuple2[1:]
print(tuple8)
#Replace element in list
Li1=[7,8,2,4,3]
Li1[-3] = 9
print(Li1)
'''tuple1 = (7,8,2,4,3)
tuple1[-3] = 9 # Throws error as it is immutable
print(tuple1)'''
#Tuple to list
val1 = (9,2,3,4)
print(val1)
print(type(val1))
val1 = list(val1)
print(val1)
print(type(val1))
#Conversion
#List to tuple
Val1=[7,8,9,-1]
print(Val1)
print(type(val1))
val1=tuple(val1)
print(val1)
print(type(val1))
| true |
86dc381dc6e2a87d16f0d12572d6cbc0eaade43d | Python | CHIEH-YU/microsoft_oneday_intern_test | /puzzle.py | UTF-8 | 1,467 | 2.6875 | 3 | [] | no_license | graph = [[6,8,2],[3,7,5],[0,6,8],[7,5,1],[4,4,4],[1,3,7],[8,2,0],[5,1,3],[2,0,6]]
puzzle=[[0]*3 for i in range(6)]
record_1 = []
record_2 = []
for i in range(6):
if i==3:
blank = input()
puzzle[i] = list(input())
for idx,k in enumerate(puzzle):
for idx1,j in enumerate(k):
if j == '*':
record_1.append(idx*3+idx1)
else:
record_2.append(idx*3+idx1)
record_3 = []
for i in record_2:
if i>8:
i = i-9
record_3.append(i)
sum1 = str(puzzle).count('*')
flag=0
if len(record_3)==8 or len(record_1)==8:
flag=1
if sum1 == 9:
for j in range(3):
new = []
for i in record_1:
if i<9:
new.append(graph[i][j])
new1 = []
new2 = []
new3 = []
new4 = []
new5 = []
new6 = []
for y in new:
new1.append(y+3)
new3.append(y+1)
new5.append(y+2)
new2.append(y-3)
new4.append(y-1)
new6.append(y-2)
if set(new1) == set(record_3) or set(new2) == set(record_3) or set(new) == set(record_3):
flag=1
break
if set(new4) == set(record_3) or set(new3) == set(record_3):
flag=1
break
if set(new5) == set(record_3) or set(new6) == set(record_3):
flag=1
break
if flag==1:
print('YES')
else:
print('NO')
else:
print('NO')
| true |
a3291ac6c2461afd7e9f67401a5169d5bd560a31 | Python | enodr/pytglib | /pytglib/api/types/chat_invite_link_info.py | UTF-8 | 1,954 | 3.015625 | 3 | [
"MIT"
] | permissive |
from ..utils import Object
class ChatInviteLinkInfo(Object):
"""
Contains information about a chat invite link
Attributes:
ID (:obj:`str`): ``ChatInviteLinkInfo``
Args:
chat_id (:obj:`int`):
Chat identifier of the invite link; 0 if the user is not a member of this chat
type (:class:`telegram.api.types.ChatType`):
Contains information about the type of the chat
title (:obj:`str`):
Title of the chat
photo (:class:`telegram.api.types.chatPhoto`):
Chat photo; may be null
member_count (:obj:`int`):
Number of members in the chat
member_user_ids (List of :obj:`int`):
User identifiers of some chat members that may be known to the current user
is_public (:obj:`bool`):
True, if the chat is a public supergroup or channel, ieit has a username or it is a location-based supergroup
Returns:
ChatInviteLinkInfo
Raises:
:class:`telegram.Error`
"""
ID = "chatInviteLinkInfo"
def __init__(self, chat_id, type, title, photo, member_count, member_user_ids, is_public, **kwargs):
self.chat_id = chat_id # int
self.type = type # ChatType
self.title = title # str
self.photo = photo # ChatPhoto
self.member_count = member_count # int
self.member_user_ids = member_user_ids # list of int
self.is_public = is_public # bool
@staticmethod
def read(q: dict, *args) -> "ChatInviteLinkInfo":
chat_id = q.get('chat_id')
type = Object.read(q.get('type'))
title = q.get('title')
photo = Object.read(q.get('photo'))
member_count = q.get('member_count')
member_user_ids = q.get('member_user_ids')
is_public = q.get('is_public')
return ChatInviteLinkInfo(chat_id, type, title, photo, member_count, member_user_ids, is_public)
| true |
ec547cccc62b3aa87505a260a0333ce8c877c1b8 | Python | sloscal1/lights | /src/lights/core.py | UTF-8 | 3,286 | 2.65625 | 3 | [
"MIT"
] | permissive | from collections import defaultdict
from functools import partial
from itertools import chain
from typing import List
def to_bytes(value: int, byte_length: int) -> bytearray:
return bytearray(value.to_bytes(byte_length, "big"))
NUM_BANDS = 11
RGB_BYTES = 3
# Message parts
MESSAGE_START = bytearray.fromhex("FF FF FF")
BEGIN = bytearray.fromhex("00 00 02")
END_PREAMBLE = bytearray.fromhex("FF")
END_GROUP = bytearray.fromhex("FF")
NOT_STREAM_SIZE = to_bytes(0, 2)
NOT_STREAMING = bytearray.fromhex("00")
STREAM_SIZE = to_bytes(NUM_BANDS * RGB_BYTES, 2)
STREAMING = bytearray.fromhex("01")
class Assignment:
def __init__(self, start_time: int, group_id: int, mode_id: int) -> None:
self.content = (
to_bytes(start_time, 4) + to_bytes(group_id, 1) + to_bytes(mode_id, 1)
)
def get_content(self) -> bytearray:
return self.content
class Group:
bit = partial(to_bytes, byte_length=1)
def __init__(self, members: List[int] = None) -> None:
self.members = list(members) or []
def add(self, value: int) -> None:
self.members.append(value)
def get_content(self) -> bytearray:
content = bytearray(chain.from_iterable(map(Group.bit, self.members)))
content += END_GROUP
return content
class Pulse:
def __init__(
self, start_color: int, end_color: int, num_steps: int, delay: int
) -> None:
self.start_color = to_bytes(start_color, 3)
end_color = to_bytes(end_color, 3)
self.num_steps = to_bytes(num_steps, 1)
self.delay = to_bytes(delay, 1)
self.inc = bytearray()
self.dir = "" # binary string during construction...
for byte in range(3):
self.inc.append(
max(abs(self.start_color[byte] - end_color[byte]) // num_steps - 1, 0)
)
self.dir += "0" if end_color[byte] > self.start_color[byte] else "1"
print(self.dir)
self.dir = to_bytes(int(self.dir, 2), 1) # Single byte once dir is known
def get_content(self) -> bytearray:
return self.start_color + self.inc + self.dir + self.num_steps + self.delay
def create_message(
assignments: List[Assignment],
modes: List[Pulse],
groups: List[Group],
streaming: bool = False,
rate: int = 10,
num_lights: int = 75,
) -> bytearray:
content = (
MESSAGE_START
+ to_bytes(rate, 2)
+ to_bytes(len(modes), 1)
+ to_bytes(num_lights, 1)
+ to_bytes(len(groups), 1)
+ to_bytes(len(assignments), 4)
+ BEGIN
+ bytearray(chain.from_iterable([mode.get_content() for mode in modes]))
+ bytearray(
chain.from_iterable([assign.get_content() for assign in assignments])
)
+ bytearray(chain.from_iterable([group.get_content() for group in groups]))
+ END_PREAMBLE
)
if streaming:
groups_to_bands = defaultdict(lambda: 0xFF)
groups_to_bands[0] = 0
content = (
content
+ STREAM_SIZE
+ STREAMING
+ to_bytes(len(groups_to_bands), 1)
+ bytearray([groups_to_bands[i] for i in range(3)])
)
else:
content += NOT_STREAM_SIZE + NOT_STREAMING
return content
| true |
529deda9a496d17eeba8bef457def21ec1e79bb6 | Python | byronwasti/CircuitsLabs | /lab3/scripts/graphing2.py | UTF-8 | 938 | 2.703125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
from scipy.optimize import curve_fit
CUR_VOL = "data/experiment1_data2_2.csv"
with open(CUR_VOL, 'r') as f:
reader = csv.reader(f)
Vin = []
Ibase = []
Iemit = []
for i, row in enumerate(reader):
if i == 0: continue
#if i < 135: continue
#if i > 183: continue
Vin.append(float(row[0]))
Ibase.append(float(row[1]))
Iemit.append(float(row[2]))
# Icollect = Ibase - Iemit
Icollect = []
for i in range(len(Ibase)):
Icollect.append(Iemit[i] - Ibase[i])
# beta = Icollect / Ibase
beta = []
for i in range(len(Icollect)):
beta.append(Icollect[i] / Ibase[i])
plt.semilogx(Ibase, beta, '.', label="Icollect vs. Ibase")
plt.title(r'$\beta$ vs. $I_{base}$', fontsize=20)
plt.ylim(180, 240)
plt.xlabel(r'$I_{base}$ (A)', fontsize=16)
plt.ylabel(r'$\beta$', fontsize=16)
plt.show()
| true |
0b00147f4922dec3d9ab02dc730fddb5c433145a | Python | nkahrs/lewin-rhythms | /svg.py | UTF-8 | 1,115 | 3.09375 | 3 | [] | no_license | # refactored "07 30 svg creation.py", svg generation only
## theme2svg: given theme, output svg rectangles to mark points
def theme2svg(theme, x_offset, y_offset, height, width, color):
for i in theme:
print('<rect x="', (x_offset+i), '" y="', y_offset,
'" height="', height, '" width="', width,
'" fill="', color, '" />', sep='')
def drawsegment(segment, x_offset, y_start, y_height, color):
print('<rect x="', x_offset + segment[0], '" y="', y_start+1,
'" height="', y_height, '" width="', 1+segment[1]-segment[0],
'" fill="', color, '" />', sep='')
# add handles
print('<rect x="', x_offset + segment[0], '" y="', y_start,
'" height="1" width="1" fill="', color, '" />', sep='')
print('<rect x="', x_offset + segment[1], '" y="', y_start,
'" height="1" width="1" fill="', color, '" />', sep='')
def drawsegments(segments, x_offset, y_start, y_height, color):
for i in segments:
drawsegment(i, x_offset, y_start, y_height, color)
y_start = y_start + 2*y_height
| true |
ef503279f689af328ce90fa5a37bb37e7f44ae66 | Python | jrbj0/IF969-Projetos | /Projeto 3 - Concessionaria/carro.py | UTF-8 | 1,725 | 2.96875 | 3 | [] | no_license | from veiculo import *
class Carro(Veiculo):
def __init__(self, fabricante, modelo, portas,
autonomia, ano, placa, renavam, chassi, reservado):
Veiculo.__init__(self, fabricante, modelo, autonomia,
ano, placa, renavam, chassi, reservado)
self.portas = portas
def __repr__(self):
'Texto simples para ser salvo no banco de dados'
return ("carro\t" + self.fabricante + "\t" +
self.modelo + "\t" + self.portas + "\t" +
self.autonomia + "\t" + self.ano + "\t" +
self.getPlaca() + "\t" + self.getRENAVAM() + "\t" +
self.getChassi() + "\t" + str(self.getReservado()))
def __str__(self):
'Mostrar resumido para listagens'
reserv = " | Livre"
if self.getReservado():
reserv = " | Reservado"
return (self.getPlaca() +
" | " + self.fabricante + " - " + self.modelo + " - " + self.ano +
" | " + self.portas + " portas" + reserv)
def mostrar(self):
'Detalhamento de todos atributos do veículo'
return ("Tipo:\t\tCarro" +
"\nFabricante:\t" + str(self.fabricante) +
"\nModelo:\t\t" + str(self.modelo) +
"\nPortas:\t\t" + str(self.portas) +
"\nAutonomia:\t" + str(self.autonomia) +
"\nAno:\t\t" + str(self.ano) +
"\nPlaca:\t\t" + str(self.getPlaca()) +
"\nRENAVAM:\t" + str(self.getRENAVAM()) +
"\nChassi:\t\t" + str(self.getChassi()) +
"\nReservado:\t" + str(self.getReservado()))
| true |
7750109f9a69ce29032d6779200f7df251a810ad | Python | KirkGuo/LeetCode-Accepted-Code | /Problems/0797-All Paths From Source to Target.py | UTF-8 | 934 | 2.953125 | 3 | [] | no_license | class node:
def __init__(self, n=None, idx=None, nums=None):
self.mask = [0 for i in range(nums)] if n==None else [each for each in n.mask]
self.path = [] if n==None else [each for each in n.path]
if idx != None:
self.mask[idx] = 1
self.path.append(idx)
class Solution:
def __init__(self):
self.ret = []
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
target = len(graph) - 1
q = []
if len(graph) == 0:
return self.ret
q.append(node(idx=0, nums=target+1))
while len(q):
tmp = q.pop(0)
if(tmp.path[-1]==target):
self.ret.append(tmp.path)
else:
for each in graph[tmp.path[-1]]:
if tmp.mask[each] == 0:
q.append(node(n=tmp, idx=each))
return self.ret
| true |
9eb5762c3c6dea89c6078d691546185ef2200c57 | Python | marquesarthur/programming_problems | /interviewbit/test/test_min_absolute_diff.py | UTF-8 | 586 | 3.15625 | 3 | [] | no_license | import unittest
from interviewbit.pointers.min_absolute_diff import Solution
class MinAbsoluteDiffTest(unittest.TestCase):
def test_base_case(self):
s = Solution()
expected = 1
A = [1, 4, 5, 8, 10]
B = [6, 9, 15]
C = [2, 3, 6, 6]
result = s.solve(A, B, C)
self.assertEqual(result, expected)
def test_other_case(self):
s = Solution()
expected = 5
A = [1, 4, 4, 5, 6, 6]
B = [1, 2, 3, 4]
C = [9, 10]
result = s.solve(A, B, C)
self.assertEqual(result, expected)
| true |
925eaf653a5bbe367baeac18341e09f0a24b4637 | Python | kslam07/potentialSolver | /potentialSolver/postProcess.py | UTF-8 | 1,106 | 2.859375 | 3 | [
"MIT"
] | permissive | """
Functions created for plotting
"""
import matplotlib.pyplot as plt
def plot_airfoil(airfoil):
# plot for test
xloc, yloc, x_1, y_1, x_3, y_3, alpha, __ = airfoil.datafile
plt.scatter(x_1[:-1], y_1[:-1], label="collocation points")
plt.scatter(x_3[:-1], y_3[:-1], label="vortex elements")
plt.plot(xloc, yloc, marker='x', label="panel edges")
plt.axis('scaled')
plt.ylim(-5*max(yloc), 5*max(yloc))
plt.grid()
plt.legend()
plt.show()
return
def plot_results(airfoil):
# retrieve x-position of collocation point
xcol = airfoil.datafile[4, :-1]
fig, ax = plt.subplots(1, 2)
ax[0].plot(xcol, airfoil.results[-1], label="pressure diff.")
ax[1].plot(xcol, airfoil.results[-2], label="lift diff.")
ax[0].set_xlim(left=0, right=1)
ax[1].set_xlim(left=0, right=1)
ax[0].legend()
ax[1].legend()
ax[0].set_title(r'$\Delta$P over x/c')
ax[1].set_title(r'$\Delta$L over x/c')
ax[0].set_xlabel('x/c')
ax[0].set_ylabel(r'$\Delta$P')
ax[1].set_xlabel('x/c')
ax[1].set_ylabel(r'$\Delta$L')
return fig, ax | true |
a9261e0d41defb7143c54bed6a20c0feb984ba41 | Python | kyeah01/Problem_Solving | /code/programmers/1_lv/printer.py | UTF-8 | 515 | 2.890625 | 3 | [] | no_license | def solution(priorities, location):
answer, current = 0, -1
lenth = len(priorities)
while priorities:
work = priorities.pop(0)
current += 1
if current == lenth:
current = 0
for i in priorities:
if i > work:
priorities += [work]
break
else:
answer += 1
if current == location:
break
return answer
p = list(map(int, input()))
l = int(input())
print(solution(p, l)) | true |
db9074d700bf23f0675e0f8afaee3feac0b997e8 | Python | hayeonk/algospot | /routing.py | UTF-8 | 665 | 2.890625 | 3 | [] | no_license | import heapq
import sys
def dijkstra(src):
dist[src] = 1.0
pq = []
heapq.heappush(pq, (1.0, src))
while pq:
cost, here = heapq.heappop(pq)
for there, c in adj[here]:
nextDist = cost * c
if dist[there] > nextDist:
dist[there] = nextDist
heapq.heappush(pq, (nextDist, there))
rl = lambda: sys.stdin.readline()
for _ in xrange (int(rl())):
V, E = map(int, rl().split())
adj = [[] for i in xrange (V)]
for i in xrange (E):
input = rl().split()
u, v, w = int(input[0]), int(input[1]), float(input[2])
adj[u].append((v, w))
adj[v].append((u, w))
dist = [1e200] * V
dijkstra(0)
print dist[-1]
| true |
21709b83d819ce63e477f83a3252bb373f3b5a9b | Python | AHHHZ975/neuralnet-pytorch | /neuralnet_pytorch/utils/numpy_utils.py | UTF-8 | 3,047 | 3.5 | 4 | [
"MIT"
] | permissive | import numpy as np
__all__ = ['is_outlier', 'smooth']
def smooth(x, beta=.9, window='hanning'):
"""
Smoothens the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
:param x:
the input signal.
:param beta:
the weighted moving average coeff. Window length is :math:`1 / (1 - \\beta)`.
:param window:
the type of window from ``'flat'``, ``'hanning'``, ``'hamming'``,
``'bartlett'``, and ``'blackman'``.
Flat window will produce a moving average smoothing.
:return:
the smoothed signal.
Examples
--------
.. code-block:: python
t = linspace(-2, 2, .1)
x = sin(t) + randn(len(t)) * .1
y = smooth(x)
"""
x = np.array(x)
assert x.ndim == 1, 'smooth only accepts 1 dimension arrays'
assert 0 < beta < 1, 'Input vector needs to be bigger than window size'
window_len = int(1 / (1 - beta))
if window_len < 3 or x.shape[0] < window_len:
return x
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
if isinstance(window, str):
assert window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'], \
'Window is on of \'flat\', \'hanning\', \'hamming\', \'bartlett\', \'blackman\''
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
else:
window = np.array(window)
assert window.ndim == 1, 'Window must be a 1-dim array'
w = window
y = np.convolve(w / w.sum(), s, mode='valid')
return y if y.shape[0] == x.shape[0] else y[(window_len // 2 - 1):-(window_len // 2)]
def is_outlier(x: np.ndarray, thresh=3.5):
"""
Returns a boolean array with True if points are outliers and False
otherwise. Adapted from https://stackoverflow.com/a/11886564/4591601.
:param x:
an ``nxd`` array of observations
:param thresh:
the modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
:return:
a ``nx1`` boolean array.
References
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(x) == 1:
return np.array([False])
if len(x.shape) == 1:
x = x[:, None]
median = np.median(x, axis=0)
diff = np.sum((x - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
| true |
19440e508de59f50f9f0ce99ee1d8391f96198d9 | Python | codename-rinzler/goldmine | /src/framework/rect.py | UTF-8 | 546 | 3.734375 | 4 | [] | no_license | class Rect:
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def center(self):
cx = (self.x1 + self.x2) / 2
cy = (self.y1 + self.y2) / 2
return (cx, cy)
def intersect(self, other):
return (self.x1 <= other.x2 and self.x2 >= other.x1 and
self.y1 <= other.y2 and self.y2 >= other.y2)
def __str__(self):
return "(" + str(self.x1) + "," + str(self.y1) + ") to ("+ str(self.x2) + "," + str(self.y2) + ")"
| true |
ea6d5588690b45eeebf3125829089425411d3738 | Python | nikhilbhatewara/HackerRank | /Python/Day5_Normal_Distribution_II.py | UTF-8 | 294 | 3.65625 | 4 | [
"MIT"
] | permissive | import math
mean, std = 70, 10
def cdf(x): return 0.5 * (1 + math.erf((x - mean) / (std * (2 ** 0.5))))
# More than 80 => 1 - P(less than 80)
print('{:.2f}'.format(100*(1-cdf(80))))
# More than 60
print('{:.2f}'.format(100*(1 - cdf(60))))
# Less than 60
print('{:.2f}'.format(100*cdf(60)))
| true |
cb56b5fbfeebedc431bf1d16639c6062bbfe8bda | Python | HighLvRiver/PythonLearning | /Study/python_basic_01_calculator.py | UTF-8 | 3,197 | 4.1875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#파이썬을 계산기로 사용하기
#파이썬 셸에서 할 수 있는 가장 간단한 프로그래밍은 파이썬을 계산기로 사용하는 것이다.
#정수의 연산
#셀에서 다음과 같이 입력해보자.
#1+1 #모두 붙여 쓰지 않는다
#1 +1 #너무 많이 띄우지 않는다. 한칸만 띄운다.
#더하기 : +
#빼기 : -
#곱하기 : *
#나누기 몫만 구하기 : //
#소숫점까지 나누기 : /
#나머지 : %
#제곱 : **
#2 + 4 -5 #1
#2 * 4 #8
#8 // 4 #2
#10 // 4 #2
#10 % 4 # 2
#2 ** 3 # 8
#Q1.
#1) 3×2−8÷4
#2) 25×6÷3+17
#3) 39021−276920÷12040
#4) 2^6−10%6
# % 는 나머지를 구하는 연산
#연산 순서와 괄호
#괄호가 있으면 괄호안을 먼저 계산한다. 하지만 파이썬은 소괄호, 중괄호, 대괄호를 구분하지 않고 모두 소괄호 기호를 사용한다.
#100 * (3 * (10 - (3 * 2)) + 8) #2000
#Q2.
#1) 12−(5×7+1)
#2) 5×{8+(10−6)÷2}
#3) 48320−{(365−5×9)÷16}×987
#4) ((3^4−3×7)%5+4)^2
#부등식의 참과 거짓 계산
#파이썬은 참과 거짓을 계산하는 부등식 연산도 가능하다. 파이썬에서는 참과 거짓을 True 또는 False 라는 값으로 나타낸다.
#등호 기호는 변수에 값을 할당하는데 사용되기 때문에 비교 연산을 할 때는 등호 기호가 두 개 반복되는 기호를 사용한다는 점에 주의한다.
# > : >
# < : <
# = : ==
# ≠ : !=
# ≥ : >=
# ≤ : <=
#Q3.
#1) 파이썬으로 계산기를 사용하여 답이 True인 부등식을 3개 만든다.
#1) 파이썬으로 계산기를 사용하여 답이 False인 부등식을 3개 만든다.
#불리언(Boolean) 대수
#파이썬에서는 참과 거짓에 대해 & (AND) 라는 연산과 | (OR) 라는 부울리언(Boolean) 대수 연산도 할 수 있다.
# & (AND) 연산은 두 값이 모두 참일 때만 답이 참이 된다. 즉, 하나라도 참이면 답은 거짓이다.
# | (OR) 연산은 두 값이 모두 거짓일 때만 답이 거짓이 된다. 즉, 하나라도 참이면 답은 참이다.
# True & True
# True & False
# False & True
# False & False
# True | True
# True | False
# False | True
# False | False
#부등식 연산과 부울리언 대수를 결합하면 다음과 같은 계산도 할 수 있다.
#(2 > 0) & (2 < 3)
#(2 > 2) | (2 < 3)
#Q4.
#1) (5≤6)&(3=4)
#2) (2≠1)|(3≥4)
#3) (5≤6)&((0=0)|(3<4))
#변수 사용하기
#어떤 값을 계속 사용하는 경우에는 그 값을 변수, 영어로 variable에 담아두었다가 사용할 수 있다.
#변수에 값을 넣는 것을 할당한다고 이야기하며 영어로는 assignment라고 한다.
#변수에 값을 할당할 때는 등호 기호를 사용하고 좌변에는 할당할 변수 이름을,우변에는 할당할 값을 쓴다.
#변수의 이름은 알파벳으로 시작하며 뒤에는 숫자가 올 수 있다.
#파이썬에서는 변수 이름의 대문자와 소문자를 구분하기 때문에 주의하여야 한다.
#즉, apple 과 Apple 과 APPLE 은 모두 서로 다른 변수이다.
#Q5.
#1) (2x−1)^2+1
#2) x^2y∗(z+10)
#3) ((j=0)&(0<k))|(i≤100)
| true |
358e45a6e8f9605e666d2dcdb0c5232139a6de18 | Python | Sabhijiit/Captcha-breaking-using-TensorFlow | /train_test_model.py | UTF-8 | 5,774 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 24 10:32:37 2018
@author: Sabhijiit
"""
import cv2 # to load the images
import numpy as np # to do matrix manipulations
from os.path import isfile, join # to manupulate file paths
from os import listdir # get list of all the files in a directory
from random import shuffle # shuffle the data (file paths)
import tensorflow as tf
import timeit
# sess = tf.InteractiveSession()
class DataSetGenerator:
def __init__(self, data_dir):
self.data_dir = data_dir
self.folder_names = self.get_folder_names()
self.file_names = self.get_data_paths()
def get_folder_names(self):
folder_names = []
for filename in listdir(self.data_dir):
if not isfile(join(self.data_dir, filename)):
folder_names.append(filename)
return folder_names
def get_data_paths(self):
data_paths = []
for label in self.folder_names:
img_lists=[]
path = join(self.data_dir, label)
for filename in listdir(path):
tokens = filename.split('.')
if tokens[-1] == 'png':
image_path=join(path, filename)
img_lists.append(image_path)
shuffle(img_lists)
data_paths.append(img_lists)
return data_paths
generator_object = DataSetGenerator(r"C:\Users\Sabhijiit\Desktop\captchured_project_directory\data_sets\TrainDataSetFinal") # Path of train data set
my_dict = {'a':0 , 'b':1 , 'c':2, 'd':3 , 'e': 4 , 'f':5 , 'g':6 , 'h':7 , 'i':8 , 'j':9 , 'k':10 , 'l':11 , 'm':12 , 'n':13 , 'o':14 , 'p':15 , 'q':16 , 'r':17 , 's':18 , 't':19 , 'u':20 , 'v':21 , 'w':22 , 'x':23, 'y':24 , 'z':25}
print("===STARTED===")
print()
n = 39000 # number of input images
input_images = np.zeros([n,784])
input_labels = np.zeros([n,26])
i = 0
for j in range(len(generator_object.folder_names)):
for path in generator_object.file_names[j]:
img = cv2.imread(path,0)
input_images[i] = np.reshape(img, 784)
input_labels[i,my_dict[generator_object.folder_names[j]]] = 1
i=i+1
print("processed" , generator_object.folder_names[j])
print()
print("===FINISHED===")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
'''
Understand why they have taken the number of features as 32 - TODO, not mentioned in the tutorial
'''
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x = tf.placeholder(tf.float32, [None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 26])
b_fc2 = bias_variable([26])
y_ = tf.placeholder(tf.float32, [None, 26])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
sess = tf.InteractiveSession()
start = timeit.default_timer()
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Here, gradient descent optimizer is replaced with the more sophisticated ADAM optimizer.
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
"""to save model"""
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
sess.run(init_op)
n = 3001
for i in range(n):
index = np.random.choice(input_images.shape[0], 50, replace=False)
x_random = input_images[index]
y_random = input_labels[index]
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: x_random, y_: y_random, keep_prob: 1.0})
print("step {0}, training accuracy {1} %".format(i, train_accuracy*100))
train_step.run(feed_dict={x: x_random, y_: y_random, keep_prob: 0.5})
finish = timeit.default_timer()
time = (finish-start)/60
print("Total time taken for {0} iterations: {1} mins".format(n, time))
save_path = saver.save(sess, r"C:\Users\Sabhijiit\Desktop\captchured_project_directory\CNNmodel\model.ckpt") # Give new path to save model
print("Model saved in path: %s" % save_path)
generator_object1 = DataSetGenerator(r"C:\Users\Sabhijiit\Desktop\captchured_project_directory\data_sets\TestDataSetFinal") # Path to test data set
print("===STARTED===")
print()
m = 13000 # number of input images
test_images = np.zeros([m,784])
test_labels = np.zeros([m,26])
l=0
for j in range(len(generator_object1.folder_names)):
for path in generator_object1.file_names[j]:
img = cv2.imread(path,0)
test_images[l] = np.reshape(img, 784)
test_labels[l,my_dict[generator_object1.folder_names[j]]] = 1
l=l+1
print("processed" , generator_object1.folder_names[j])
print()
print("===FINISHED===")
print()
print("==> Computing test accuracy")
accuracy = accuracy.eval(feed_dict={x: test_images, y_: test_labels, keep_prob: 1.0})
print("===> Testing accuracy is {0} %".format(accuracy*100))
| true |
89321f63eb27f750cf2a4a52add15d2e59d5bde7 | Python | yulifromchina/python-exercise | /Scrapy learn/Spider-Series/doubanTop250_spider/parse.py | UTF-8 | 1,753 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import re
import requests
import logging
import logging.config
import time
from bs4 import BeautifulSoup
from database import *
logging.config.fileConfig("logger.conf")
logger = logging.getLogger("spiderLogger")
def write_database(movie_title, score, judge_people,info, url):
insert_or_update_data(movie_title, float(score),int(judge_people), info, url)
def get_movie_link(url):
res = requests.get(url)
soup = BeautifulSoup(res.text, "lxml")
movies = soup.find_all("div",{"class":"item"})
for i in movies:
href = i.find("div",{"class":"pic"}).find("a").get("href")
title = i.find("span",{"class":"title"}).get_text()
score = i.find("span",{"class":"rating_num"}).get_text()
judge_people = i.find("div",{"class":"star"}).find_all("span")[-1].get_text()
judge_people = re.sub("\D","",judge_people)
try:
info = i.find("span",{"class":"inq"}).get.text()
except:
info = "无介绍"
logger.debug("href:{} title:{} score:{} judge_people:{} info:{}".format(href, title, score, judge_people, info))
write_database(movie_title=title, score=score, judge_people=judge_people,info=info,url=href)
time.sleep(0.5)
def main():
res = requests.get("https://movie.douban.com/top250")
soup = BeautifulSoup(res.text, 'lxml')
page_link = "https://movie.douban.com/top250?start={}&filter="
page = soup.find_all("div",{"class":"paginator"})
if len(page)>1:
return
a_content = page[0].find_all("a",recursive = False)
page_list = [1]
for i in a_content:
page_start = int(i.get("href").split("=")[1].split("&")[0])
page_list.append(page_start)
for i in page_list:
logger.debug(page_link.format(i))
get_movie_link(page_link.format(i))
if __name__ == "__main__":
main()
| true |
ecb479f44b66fab8b89861cb1c11d334e8a4dbc2 | Python | lty9520/crawlerDemo | /crawler/downurlHandler.py | UTF-8 | 2,066 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: ProxyHandler.py
Description :
Author : LHY
date: 2020/8/29
-------------------------------------------------
Change Activity:
2020/8/29:
-------------------------------------------------
"""
__author__ = 'LHY'
from helper.downurl import downUrl
from db.dbClient import DbClient
from handler.configHandler import ConfigHandler
class DownurlHandler(object):
""" downurl CRUD operator"""
def __init__(self):
self.conf = ConfigHandler()
self.db = DbClient(self.conf.dbConn)
self.db.changeTable(self.conf.tableName)
def get(self):
"""
return a down url item
:return:
"""
downurl = self.db.get()
if downurl:
return downUrl.createFromJson(downUrl)
return None
def pop(self):
"""
return and delete a down url item
:return:
"""
downurl = self.db.pop()
if downurl:
return downUrl.createFromJson(downurl)
return None
def put(self, downurl):
"""
put a down url item
:param downurl: down utl item
:return:
"""
self.db.put(downurl)
def delete(self, downurl):
"""
delete a down url item
:param downurl: down url item
:return:
"""
return self.db.delete(downurl.title)
def getAll(self):
"""
get all down url from db as list
:return:
"""
down_dict =self.db.getAll()
return [downUrl.createFromJson(value) for _, value in down_dict.items()]
def exists(self, downurl):
"""
check down url item exists
:param downurl:
:return:
"""
return self.db.exists(downurl.title)
def getCount(self):
"""
return down url count
:return:
"""
total_down_url = self.db.getCount()
return {'count' : total_down_url} | true |
90a9532d6908e530b19155aa8798a5b756a50fcf | Python | ZhaoYu1105/ElemeSpider | /main_spider.py | UTF-8 | 623 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2021/8/7 14:30
# @FileName: main_spider.py
# @Software: VSCode
# @Comments: 饿了么爬虫,爬取商品信息|主函数
import ele_login
import merchant_spider
import data_process
if __name__=="__main__":
# 自动登录并记录cookie
ele_login.login_and_cookie_get()
mycookie = ele_login.cookie_process()
# 手动输入商家ID
# ID格式为:E11239979582907458257
inputid = input("请输入商家ID:")
# 爬取信息
merchant_spider.merchant_spider(inputid,mycookie)
# 将爬取数据处理为excel格式
data_process.json_to_excel()
| true |
b88ff614e78d89ac3083f8b24f2953071fa712b1 | Python | HBlack09/ICTPRG-Python | /Reading and Writing Files/1.py | UTF-8 | 237 | 4.09375 | 4 | [] | no_license | #Request Input
x1 = int(input("Please write down any positive number: "))
y1 = int(input("Write down another number: "))
#Create output file
f = open("math.txt", "a")
#Print output to file
f.write(f"{x1 + y1}")
#Close file
f.close()
| true |
02a1921dbe2f76bd55d1dfe9913338fa42da4059 | Python | geoluengas/obd_tool | /read_data/read_data.py | UTF-8 | 3,587 | 2.515625 | 3 | [] | no_license | import random
import os
import obd
import time
import threading
from obd.OBDResponse import Monitor
from obd.utils import BitArray
def main():
time.strftime('%X %x %Z')
# obd.logger.setLevel(obd.logging.DEBUG)
connection = obd.OBD("/dev/ttys001") # auto-connects to USB or RF port
reader = DataReader(connection)
# t1 = threading.Thread(target=reader.read_cmds, args=(999, 1, True, 0.05))
# t1.start()
print(len(reader.cmds))
reader.read_cmds(999, 0.1, False, 1)
print(len(reader.cmds))
class DataReader():
def __init__(self, connection, f_dir="/tmp/", f_extension="obd_elm_data"):
for c in connection.supported_commands:
print(c.name)
self.connection = connection
self.cmds = connection.supported_commands
self.null_response_cmds = []
self.f_dir = f_dir
self.f_extension = f_extension
self.clean_files()
def clean_files(self):
test = os.listdir(self.f_dir)
for item in test:
if item.endswith(self.f_extension):
print("Deleting existing file" + str(os.path.join(self.f_dir, item)))
os.remove(os.path.join(self.f_dir, item))
def write_file(self, file, line, max_lines=100):
f = open(file, "a")
f.write(str(line) + "\n")
f.close()
with open(file, 'r') as fin:
data = fin.read().splitlines(True)
# print("lines = " + str(len(data)))
if len(data) >= max_lines:
with open(file, 'w') as fout:
fout.writelines(data[1:])
def read_cmd(self, cmd, randomize=False, rand_perc=0.1):
if self.connection.is_connected():
response = self.connection.query(cmd) # send the command, and parse the response
#print(response)
value = "unknown"
units = "unkown"
#print(len(self.null_response_cmds))
if isinstance(response.value, str):
# print("STRING!")
value = response.value
units = "string"
elif (response.value is None) \
or isinstance(response.value, BitArray) \
or isinstance(response.value, Monitor) \
or isinstance(response.value, list):
# TODO handle these types
# print("*** Unhandled Type : " + str(type(response.value)) + "***")
# print(response.value)
value = "unknown"
units = "unkown"
else:
value = response.value.magnitude
if randomize:
value = int(value)
value = random.randint(int(value - (value * rand_perc)), int(value + (value * rand_perc)))
units = response.value.units
# print(r_response)
f_name = str(response.command.name + "." + self.f_extension)
f_name = self.f_dir + f_name
# print(f_name)
self.write_file(f_name, value)
return response
else:
print("No connection to " + str(self.connection))
def read_cmds(self, iterations=999, interval=1, randomize=False, rand_perc=0.05):
i = 0
while i < iterations and self.connection.is_connected():
#print("Run : " + str(i))
for cmd in self.cmds:
# print("CMD " + cmd.name)
self.read_cmd(cmd, randomize, rand_perc)
i += 1
time.sleep(interval)
if __name__ == '__main__':
main()
| true |
e01eddcf7de4ce3fc20600a9ae5cc737160c338a | Python | tharvell/interview_questions | /questions/do_they_add_up.py | UTF-8 | 284 | 3.59375 | 4 | [] | no_license | # Do they add up ?
def do_they_add_up(numbers, number):
"""
Args:
Returns:
Raises:
"""
for i in range(len(numbers)):
for j in range(i, len(numbers)):
if numbers[i] + numbers[j] == number:
return True
return False
| true |
7cdecf6bfa378a499afdf9fb065d576ec5aeeeea | Python | joacomf/procesamiento_seniales | /guia01/ejercicio15.py | UTF-8 | 565 | 3.15625 | 3 | [] | no_license | import matplotlib.pyplot as plot
import numpy
from herramientas.signals.Signal import Signal
from herramientas.OddPart import OddPart
from herramientas.EvenPart import EvenPart
import random
class RandomFunction(Signal):
def value_at(self, time):
return random.randint(-2, 2)
time_collection = numpy.linspace(-10, 10).tolist()
signal = RandomFunction()
odd = OddPart(signal)
even = EvenPart(signal)
plot.stem(time_collection, odd.generate_function(time_collection))
plot.stem(time_collection, even.generate_function(time_collection), "-ro")
plot.show()
| true |
f484bdb4db327d1963ea3385f55dbabf3c15f3be | Python | zahybnaya/mnk | /scripts/calc_avg_time_predictions.py | UTF-8 | 3,299 | 2.546875 | 3 | [] | no_license | #!/bin/python
# Seems like a way to average rts data
from sys import argv
from numpy import std
def get_rts(rt_values_file):
rts = {}
with open(rt_values_file, 'r') as f:
for line in f:
if line[1].isalpha():
continue
fields=line.split(',')
key=(fields[6],fields[7])
val=fields[9].strip()
rts[key]=val
return rts
try:
switch_values_file = argv[1]
rt_values_file = argv[2]
except:
print "<time_values> <rt_values>"
exit(-1)
rts=get_rts(rt_values_file)
12.3683,0.319881,3,0,10.8121,1.94098,0,0,0
12.3683,0.319881,3,0,10.8121,1.94098,0,0,0
measures=['black','white','player','best_diff','entropy','tree_switch','count_evals','max_val', 'norm_best_diff', 'num_consecutive', 'num_nodes' , 'num_pieces', 'num_patterns']
boards={}
with open(switch_values_file, 'r') as f:
for line in f:
if line[1].isalpha():
continue
fields=line.split(',')
board=(fields[0],fields[1])
color=fields[2].strip()
measures_lists = boards.get(board,{})
bd = measures_lists.get('best_diff',[])
bd.append(float(fields[3].strip()))
measures_lists['best_diff']=bd
en = measures_lists.get('entropy',[])
en.append(float(fields[4].strip()))
measures_lists['entropy']=en
ts = measures_lists.get('tree_switch',[])
ts.append(float(fields[5].strip()))
measures_lists['tree_switch']=ts
mv = measures_lists.get('max_val',[])
mv.append(float(fields[7].strip()))
measures_lists['max_val']=mv
rbd = measures_lists.get('norm_best_diff',[])
rbd.append(float(fields[8].strip()))
measures_lists['norm_best_diff']=rbd
nc = measures_lists.get('num_consecutive',[])
nc.append(float(fields[9].strip()))
measures_lists['num_consecutive']=nc
np = measures_lists.get('num_nodes',[])
np.append(float(fields[10].strip()))
measures_lists['num_nodes']=np
npp = measures_lists.get('num_patterns',[])
npp.append(float(fields[11].strip()))
measures_lists['num_pieces']=npp
boards[board]=measures_lists
print "best_diff,entropy,tree_switch,max_val,norm_best_diff,num_consecutive,num_nodes,rt"
bd_std=[]
bd_means=[]
en_std=[]
en_means=[]
ts_std=[]
ts_means=[]
for b in boards:
bd=sum(boards[b]['best_diff'])/len(boards[b]['best_diff'])
bd_std.append(std(boards[b]['best_diff']))
bd_means.append(bd)
en=sum(boards[b]['entropy'])/len(boards[b]['entropy'])
en_std.append(std(boards[b]['entropy']))
en_means.append(en)
ts=sum(boards[b]['tree_switch'])/len(boards[b]['tree_switch'])
ts_std.append(std(boards[b]['tree_switch']))
ts_means.append(ts)
mvv=sum(boards[b]['max_val'])/len(boards[b]['max_val'])
nbd=sum(boards[b]['norm_best_diff'])/len(boards[b]['norm_best_diff'])
noc=sum(boards[b]['num_consecutive'])/len(boards[b]['num_consecutive'])
non=sum(boards[b]['num_nodes'])/len(boards[b]['num_nodes'])
print str(bd)+","+ str(en) +"," + str(ts) + "," + str(mvv) + " ," + str(nbd) + ',' + str(noc) +',' + str(non) +',' + rts[b]
#print zip(bd_means,bd_std)
#print zip(en_means, en_std)
#print zip(ts_means ,ts_std)
| true |
198bf6b09c1c37038bc5e392dfeebb096b350308 | Python | gSchool/dsi-prep-autochallenge-test | /images/makeplt.py | UTF-8 | 197 | 2.734375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
x_vals = np.linspace(-2 * np.pi, 2*np.pi, 25)
plt.figure(figsize=(6, 6))
plt.scatter(x_vals, np.sin(x_vals))
plt.savefig('correct_solution.png') | true |
0ebce30544f94bb971fe07365cbdb7ea7d6ae691 | Python | gp-learning/Python_educative_assignment | /LC409.Longest Palindrome.py | UTF-8 | 501 | 3.078125 | 3 | [] | no_license | from collections import Counter
def longestPalindrome( s):
"""
:type s: str
:rtype: int
"""
res = {}
ans=[]
out=[]
x=0
y=0
for i in s :
if i not in res:
res[i] = 1
else:
res[i] += 1
su = 0
for val in res.values():
if val %2== 0:
su+= val
else:
su = su+(val-1)
if len(s) > su:
return su+1
else:
return su
print(longestPalindrome(s = "bananas")) | true |
cf2b6a34e06d1100df151666d4a6a7452da15bf0 | Python | aquarion/lampstand | /lampstand/reactions/base.py | UTF-8 | 1,681 | 2.546875 | 3 | [] | no_license | import time
import ConfigParser
def __init__():
pass
class Reaction:
#canSay = ("Aquarion")
__name = 'Base'
cooldown_number = 6
cooldown_time = 360
uses = []
def __str__(self):
return self.__name
def __repr__(self):
return '<%s Reaction>' % self
def __init__(self, connection):
pass
def getconfig(self, connection):
try:
config = connection.config.items(self.__name)
self.config = {}
for item in config:
self.config[item[0]] = item[1]
except ConfigParser.NoSectionError:
print "no config for %s" % self.__name
pass
# def privateAction(self, connection, user, channel, message, matchindex =
# 0):
def overUsed(self, uses=False, number=False, cooldown=False):
if (uses):
print "Note: %s is still using the old overused syntax" % self.__name
if len(self.uses) >= self.cooldown_number:
first_use = int(self.uses[0])
use_allowed_again = first_use + self.cooldown_time
if time.time() < use_allowed_again:
print "Use Blocked. Try again in %s" % (int(use_allowed_again) - time.time())
return True
else:
print "Now %s, Limit at %s (%d)" % (time.time(), use_allowed_again, time.time() - int(use_allowed_again))
return False
def updateOveruse(self):
## Overuse Detectection ##
self.uses.append(int(time.time()))
if len(self.uses) > self.cooldown_number:
self.uses = self.uses[1:self.cooldown_number]
## Overuse Detectection ##
| true |
d16d2a8fe429ff67d3c8c7df475597403b776bc0 | Python | ChastityAM/Python | /Python-sys/HelloWorld.py | UTF-8 | 56 | 3 | 3 | [] | no_license | name = "Chuckie"
print (name + " says, 'Hello World'.") | true |
d0d0838accb0cd6e7491f1e69a9b528f167b8825 | Python | evamrom/messaging_system | /app.py | UTF-8 | 4,752 | 2.765625 | 3 | [] | no_license | from datetime import datetime
from time import strftime
from flask import Flask, jsonify, request
from users.current_user import current_user
from users.users import users
app = Flask(__name__)
@app.route('/register', methods=["POST"])
def register():
"""Register the user to system. If the user already exists it return me a message.
The user need to have a unique username and password.
"""
try:
data = request.get_json()
username = data.get("username")
password = data.get("password")
if users.get_user(username, password):
return jsonify({"status": "user already exists"})
else:
users.add_user(username, password)
return jsonify({"status": "succeed to register"})
except:
return jsonify({"status": "error"})
@app.route('/login', methods=["POST"])
def login():
"""Login to the system with username and password. If the user doest exists in the system or
the password or username incorrect the system will alert and the user can try again.
The system initialize a current user so from now it will be the user logged it.
"""
try:
data = request.get_json()
username = data.get("username")
password = data.get("password")
new_current_user = users.get_user(username, password)
if new_current_user:
current_user.set_current_user(new_current_user)
curr = current_user.get_current_user()
if not curr.get_logged_in():
curr.set_login()
return jsonify({"status": "successfully logged in"})
else:
return jsonify({"status": "user already logged in"})
else:
return jsonify({"status": "username or password not correct"})
except:
return jsonify({"status": "error"})
@app.route('/logout', methods=["GET"])
def logout():
"""Logout from the system. It means the current user return to be None
"""
try:
current_user.get_current_user().set_logout()
current_user.set_current_user(None)
except:
return jsonify({"status": "error"})
return jsonify({"status": "successfully logged out"})
@app.route('/write_message', methods=["POST"])
def write_message():
try:
curr = current_user.get_current_user()
data = request.get_json()
sender = curr.get_username()
receiver = data.get("receiver")
message = data.get("message")
subject = data.get("subject")
creation_date = datetime.now().strftime("%d/%m/%Y, %H:%M:%S")
if curr.get_username() == sender:
receiver_user = users.get_receiver(receiver)
if receiver_user:
curr.write_message(sender, receiver, message, subject, creation_date, receiver_user)
else:
return jsonify({"status": "the receiver not registered yet"})
else:
return jsonify({"status": "you are not the sender in the message"})
return jsonify({"status": "successfully written message"})
except:
return jsonify({"status": "error"})
@app.route('/get_all_messages', methods=["GET"])
def get_all_messages():
"""Get all messages from the user currently logged in
"""
try:
curr = current_user.get_current_user()
except:
return jsonify({"status": "error"})
return jsonify(curr.get_messages().output_json_all_messages())
@app.route('/get_all_unread_messages', methods=["GET"])
def get_all_unread_messages():
"""Get all the unread messages from the user currently logged in
"""
try:
curr = current_user.get_current_user()
except:
return jsonify({"status": "register error"})
return jsonify(curr.get_messages().output_json_all_unread_messages())
@app.route('/read_message', methods=["GET"])
def read_message():
"""Read the last message of the logged in user, no matter if its read or not.
"""
try:
curr = current_user.get_current_user()
message = curr.get_messages().get_one_message()
except:
return jsonify({"status": "register error"})
return jsonify(message.json_output())
@app.route('/delete_message', methods=["GET"])
def delete_message():
"""Delete the last message of the logged in user, no matter if its read or not.
"""
try:
curr = current_user.get_current_user()
curr.get_messages().delete_message()
except:
return jsonify({"status": "delete message error"})
return jsonify({"status": "message deleted successfully"})
if __name__ == '__main__':
app.run(host="localhost", port='8080', debug="true")
# app.run( port='8080', debug="true")
| true |
acb576a4524f3f53273e3a3e69351558cebcf7ec | Python | tedelon/py3Gat | /py3gat_demo/Gat/util/methodtracer.py | UTF-8 | 1,998 | 2.671875 | 3 | [] | no_license | # coding=utf-8
'''
Created on 2013-7-8
@author: tiande.zhang
'''
from Gat.util.logger4py.logger import Logger
from settings import GlobalConfig
import functools
import re
def MethodTracer(msg=None):
def MethodInvokeTracer(method):
@functools.wraps(method)
def tracer(*args):
#print(method.__name__)
traceMessage(method, args, msg)
return method(*args)
return tracer
return MethodInvokeTracer
def traceMessage(method,args,msg=None):
'''信息记录入日志'''
if GlobalConfig.IsDebug:
logger=Logger()
if msg:
msg = improve_msg(msg,args)
logger.traceloginfo(msg)
logger.tracelog(method.__module__ + "." + method.__name__ + " was invoked and parameters are " + str(args))
def traceFrameMessage(msg=None,level="info"):
'''信息记录入日志'''
if GlobalConfig.IsDebug:
logger=Logger()
logger.tracelog(logger.get_invoker_info())
if msg != None:
if level == "info":
logger.traceloginfo(msg)
elif level == "debug":
logger.tracelog(msg)
def improve_msg(msg,args):
arg_keys_list = re.findall("\\${args\\[(.*?)]\\[(.*?)]}\\$", msg)
if arg_keys_list:
for arg_keys in arg_keys_list:
arg_key0 = int(arg_keys[0])
# print(arg_key0)
arg_key1 = str(arg_keys[1])
# print(arg_key1)
if arg_key1 == "":
result = args[arg_key0]
# print("///" + str(args[arg_key0]))
else:
result = args[arg_key0][arg_key1]
# print("///" + str(args[arg_key0][arg_key1]))
msg = msg.replace(re.search("\\${(.*?)}\\$", msg).group(), str(result))
return msg
@MethodTracer("ssssssss")
def exe():
print("aaaa")
pass
if __name__ == "__main__":
print(exe.__module__)
exe()
traceMessage(method=exe,args="",msg="不不不不不不木") | true |
59edbd18f7485d4a785f1ba275849b6af6abe217 | Python | vdomos/snips-chacon | /action-vdomos-setChacon.py | UTF-8 | 3,778 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
intent message hermes/intent/vdomos:setChaconOff: {"sessionId":"19b22b6f-4dcc-459c-88ef-fc7a26f3fe90","customData":null,"siteId":"default","input":"éteint la lumière du séjour","intent":{"intentName":"vdomos:setChaconOff","probability":1.0},"slots":[{"rawValue":"séjour","value":{"kind":"Custom","value":"séjour"},"range":{"start":21,"end":27},"entity":"room","slotName":"house_room"}]}
"""
import ConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
import io
import requests
CONFIGURATION_ENCODING_FORMAT = "utf-8"
CONFIG_INI = "config.ini"
DMGROOMSID = {"salon": "5", "canapé": "6", "séjour": "7", "cuisine": "8", "extérieur": "9", "devant": "9", "panier": "10", "chien": "10"}
DMGCMDURL = "http://hermes:40406/rest/cmd/id/"
class SnipsConfigParser(ConfigParser.SafeConfigParser):
def to_dict(self):
return {section : {option_name : option for option_name, option in self.items(section)} for section in self.sections()}
def read_configuration_file(configuration_file):
try:
with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:
conf_parser = SnipsConfigParser()
conf_parser.readfp(f)
return conf_parser.to_dict()
except (IOError, ConfigParser.Error) as e:
return dict()
def subscribe_intent_callback(hermes, intentMessage):
conf = read_configuration_file(CONFIG_INI)
action_wrapper(hermes, intentMessage, conf)
def httpSetChacon(room, roomState):
if room not in DMGROOMSID:
print("Erreur, pièce '%s' inconnue" % room)
return False
url = DMGCMDURL + DMGROOMSID[room] + "?state=" + roomState # "http://hermes:40406/rest/cmd/id/7?state=1"
print("url = %s" % url)
try:
req = requests.get(url)
except requests.exceptions.RequestException as err:
print("Erreur RequestException: '%s'" % err)
return False
if req.status_code != 200 and req.status_code != 204:
print("Erreur RequestHttp: '%s'" % req.status_code)
return False
return True
def action_wrapper(hermes, intentMessage, conf):
""" Write the body of the function that will be executed once the intent is recognized.
In your scope, you have the following objects :
- intentMessage : an object that represents the recognized intent
- hermes : an object with methods to communicate with the MQTT bus following the hermes protocol.
- conf : a dictionary that holds the skills parameters you defined
Refer to the documentation for further details.
"""
print("action-vdomos-setChacon.py running ...")
if len(intentMessage.slots.house_room) > 0:
room = intentMessage.slots.house_room.first().value # We extract the value from the slot "house_room/room"
if intentMessage.intent.intent_name == 'vdomos:setChaconOn':
roomState = "1"
else:
roomState = "0"
if httpSetChacon(room, roomState):
if roomState == "1":
result_sentence = "Lumiere {} allumée".format(str(room)) # The response that will be said out loud by the TTS engine.
else:
result_sentence = "Lumière {} éteinte".format(str(room))
else:
result_sentence = "Echec commande lumiere {}".format(str(room))
else:
pass
hermes.publish_end_session(intentMessage.session_id, result_sentence)
if __name__ == "__main__":
with Hermes("localhost:1883") as h:
h.subscribe_intent("vdomos:setChaconOn", subscribe_intent_callback) \
.subscribe_intent("vdomos:setChaconOff", subscribe_intent_callback) \
.start()
| true |
9b1564c40850edf3088898dbc7989adb7086e928 | Python | khlin216/recurrent_portfolio | /rnn_portfolio/preprocessing.py | UTF-8 | 14,897 | 2.796875 | 3 | [
"MIT"
] | permissive | """Functions that load data from txt files, as well as clean
loaded data of NaNs, zeros, and other oddities.
"""
import glob
from datetime import datetime, timedelta
from sklearn.preprocessing import StandardScaler
import numpy as np
from . import NP_DTYPE
def get_n_batch(n_timesteps, horizon, val_period,
n_sharpe, batch_size):
"""Calculate the number of batches per epoch of neural net training.
Args:
n_timesteps (int):
horizon (int):
val_period (int):
n_sharpe (int):
batch_size (Int):
Returns:
batches_per_epoch (int): The number of batches to use per epoch
of training for the neural net.
"""
if val_period > 0:
batches_per_epoch = int(np.floor((n_timesteps - horizon - val_period
-2 * n_sharpe + 1) / batch_size))
else:
batches_per_epoch = int(np.floor((
n_timesteps - horizon - n_sharpe + 1) / batch_size))
return batches_per_epoch
def draw_timeseries_batch(all_data, market_data, horizon,
batch_size, batch_id, randseed=1):
"""Make batches of data. Used by split_val_tr.
Args:
all_data: Data which the neural net uses to output a portfolio.
market_data: Data the neural net uses to score a portfolio, consisting
of arrays of open, close, high and low prices.
batch_size: Number of data per epoch.
batch_id: Data id in the batch.
randseed: Can be the epoch number, helps randomize between epochs.
Returns:
all_batch (n_batchsize, n_timesteps, data): Batches for input
data to neural net.
market_batch (n_batchsize, n_timesteps, market_data): Batches
for scoring for neural net.
Raises:
IndexError: If batch_id is too large for given
batch_size and all_data.shape[0].
"""
old_state = np.random.get_state()
np.random.seed(randseed)
perm_ids = np.random.permutation(all_data.shape[0] - horizon + 1)
np.random.set_state(old_state)
if (batch_id + 1) * batch_size > perm_ids.size:
raise IndexError('Cant make this many batches, not enough data!')
all_batch = np.zeros((
batch_size, horizon, all_data.shape[1])).astype(NP_DTYPE)
market_batch = np.zeros((
batch_size, horizon, market_data.shape[1])).astype(NP_DTYPE)
start_ids = perm_ids[batch_id * batch_size : (batch_id + 1) * batch_size]
for point_id, start_id in enumerate(start_ids):
all_batch[point_id, :, :] = all_data[start_id: start_id+horizon]
market_batch[point_id, :, :] = market_data[start_id: start_id+horizon]
return all_batch, market_batch
def split_val_tr(all_data, market_data, valid_period, horizon,
n_for_sharpe, batch_id, batch_size, randseed):
""" Make batches of data, splitting it into validation and training sets.
Args:
all_data: Data which the neural net uses to output a portfolio.
market_data: Data the neural net uses to score a portfolio, consisting
of arrays of open, close, high and low prices.
valid_period: Number of batches of validation data.
horizon: Size of total horizon used to predict n_for_sharpe.
n_for_sharpe: Number of portfolios output to use
for gradient calculation.
batch_size: Number of data per epoch.
batch_id: Data id in the batch.
randseed: Can be the epoch number, helps randomize between epochs.
Returns:
batches_per_epoch calculated as follows:
int(np.floor((all_data.shape[0]-horizon-2*n_ofr_sharpe-
valid_period+1)/float(batch_size)))
For validation data, the batch_id is set to 0,
and the randseed is set to 1, so it will
always return the same validation data.
For time indexing, all_data is indexed -1 from market_data
so that the positions are predicted from
all_data, and scored against market_data.
Raises:
IndexError: If valid_period does not divide batch_size.
"""
all_val = None
market_val = None
if valid_period > 0:
all_val, market_val = draw_timeseries_batch(
all_data=all_data[-valid_period-horizon-n_for_sharpe+1:-1],
market_data=market_data[-valid_period-horizon-n_for_sharpe+2:],
horizon=horizon+n_for_sharpe-1,
batch_size=valid_period,
batch_id=0, randseed=1)
market_val = market_val[:, -n_for_sharpe:, :]
if batch_size % valid_period != 0:
raise ValueError, 'valid_period must be a divisor of batch_size!'
all_val = np.tile(all_val, [batch_size/valid_period, 1, 1])
market_val = np.tile(market_val, [batch_size/valid_period, 1, 1])
all_batch, market_batch = draw_timeseries_batch(
all_data=all_data[:-valid_period-n_for_sharpe-1]
if valid_period > 0 else all_data[:-1],
market_data=market_data[1:-valid_period-n_for_sharpe]
if valid_period > 0 else market_data[1:],
horizon=horizon+n_for_sharpe-1,
batch_size=batch_size,
batch_id=batch_id, randseed=randseed)
market_batch = market_batch[:, -n_for_sharpe:, :]
return all_val, market_val, all_batch, market_batch
def load_nyse_markets(start_date, postipo=100):
""" Loads nyse markets which start before start_date-postipo.
Args:
start_date: date of starting to consider data
postipo: number of days stock befor start_date the stock must
start to be considered.
"""
all_nyse = glob.glob('tickerData/*nyse.txt')
alives = []
start_date_minuspostipo = (datetime.strptime(start_date, '%Y%m%d') -
timedelta(days=postipo)).strftime('%Y%m%d')
for fname in all_nyse:
data = open(fname, 'r').readlines()
if (int(data[1].split(',')[0]) < int(start_date_minuspostipo) and
int(data[-1].split(',')[0]) > int(start_date)):
alives.append(fname)
assert len(alives) > 0, 'No stocks returned.'
return [symbol.split('/')[1][:-4] for symbol in alives]
def preprocess(settings, opens, closes, highs, lows, dates,
postipo=100, filler=0.0000001, data_types=[]):
"""Preprocesses stock price data for use in our neural nets.
Replaces nans in market price data imported using quantiacsToolbox.py using
the following rules:
1) If the first day of closing prices contains nans, it is assumed that
the stock has not yet been listed on an index.
2) For such a stock, the open, close, high, and low prices are replaced
with a filler value if the last day of provided stock data is less than a
specified number of days (given by the parameter postipo) after the final
nan in the closing prices.
3) If the last day of provided stock data is more than postipo days after
the final nan in the closing prices, the initial nans are replaced with the
closing price on the first non-nan day.
4) The first non-nan day's open, high, and low prices are also replaced
with the close on that day.
5) Any other nans in the closing prices are filled with the previous valid
closing price. This includes both delisted stocks and nans from errors in
the initial data.
6) After these steps, any remaining nans in opens, highs, and lows are
replaced with the closing price on the same day.
7) The closing prices of the market CASH are replaced by a constant value.
8) All of the input data is normalized to make their values close to 1,
to improve the performance of the neural net training.
9) Any nans in other price data are replaced by zeros.
10) Selects correct data for all_data from data_types.
Args:
markets (list): names of markets
opens, ..., totalcaps (np arrays): market prices given stock index
and day index, indices look like opens[day][stock]
dates (np array): dates in yyyymmdd format
postipo (int): number of days to wait to begin including prices
filler (float): value to fill with
data_types (list): list of selected features.
Returns:
filled_prices (np array): horizontally concatenated array of
preprocessed o, c, h, l
all_data (np array): horizontally concatenated array of all data
should_retrain (np array): boolean array, length number of stocks,
which indicates which stocks were postipo days after their initial
non-nan closing price on the final day of price data
"""
# Normalize values by magic numbers to stay within reasonable ranges.
n_markets = opens.shape[1]
divide_prices_by = 50000.
opens = opens / divide_prices_by
closes = closes / divide_prices_by
highs = highs / divide_prices_by
lows = lows / divide_prices_by
# Make list of stocks for which close starts as nan. We will assume these
# are preipo stocks in the data.
cnans = np.isnan(closes)
preipo = cnans[0]
# Copy prices to make sure not to clobber past prices when nanning things.
closes_copy = np.array(closes)
# Prices other than closes.
prices = [opens, highs, lows]
prices_copy = []
for price in prices:
prices_copy.append(np.array(price))
# Compute the number of days after nans stop
# for a particular stock in close.
daysipo = np.logical_not(cnans).cumsum(0)
# Loop throught the days in closes.
last_close = closes[0]
if 'CASH' in settings['markets']:
cashindex = settings['markets'].index('CASH')
for day, close in enumerate(closes):
# Replace nans with previous close in closes and closes_copy.
closes_copy[day, np.isnan(close)] = last_close[np.isnan(close)]
close[np.isnan(close)] = last_close[np.isnan(close)]
# Replace closes which don't have enough days after ipo with nans.
tonan = np.logical_and(daysipo[day] < postipo, preipo)
close[tonan] = np.nan
# Do the same for the other prices.
for price in prices:
price[day, tonan] = np.nan
# If enough days have passed since ipo, replace old nans with first
# non-nan closing price.
if day >= postipo:
enoughdays = daysipo[day] == postipo
closes[:day, enoughdays] = np.vstack(
(np.tile(closes_copy[day-postipo+1, enoughdays],
(day-postipo+1, 1)),
closes_copy[day-postipo+1:day, enoughdays]))
# And for the other prices, replace old with first non-nan close,
# but restore the infomation about the other prices except on the
# first non-nan day, where we will replace everything with close.
for count, price in enumerate(prices):
price[:day+1, enoughdays] = np.vstack(
(np.tile(closes_copy[day-postipo+1, enoughdays],
(day-postipo+2, 1)),
prices_copy[count][day-postipo+2:day+1, enoughdays])
)
else:
enoughdays = np.zeros((len(close)), dtype=bool)
if 'CASH' in settings['markets']:
close[cashindex] = 1/divide_prices_by
last_close = close
# The last value of enoughdays will tell us whether we "turned on" a stock.
should_retrain = enoughdays
# Fill remaining nans in close with filler. These should only be stocks
# which have not had enough days since ipo.
closes[np.isnan(closes)] = filler
# Fill all remaining nans in price matrices with closes.
for price in prices:
price[np.isnan(price)] = closes[np.isnan(price)]
# Construct price matrix to return.
filled_prices = np.hstack((opens, closes, highs, lows))
# Turn dates into a unit circle.
y_date, x_date = circle_dates(dates)
all_data = np.hstack((opens, closes, highs,
lows, x_date[:, None], y_date[:, None]))
all_data = all_data.astype(NP_DTYPE)
all_data[np.isnan(all_data)] = 0
# Run backtester with preprocessing.
if len(settings['data_types']) == 0:
# If no data_types are chosen, uses standard scaler on OPEN data.
all_data = all_data[:, :n_markets]
else:
# Otherwise select the datatypes required.
data = np.hstack([all_data[:, n_markets * j: n_markets * (j + 1)]
for j in settings['data_types']])
all_data = data
# Returns check to make sure nothing crazy happens!
assert np.isnan(filled_prices).sum() == 0
assert np.isinf(filled_prices).sum() == 0
assert np.isnan(all_data).sum() == 0
assert np.isinf(all_data).sum() == 0
if settings['iter'] % settings['retrain_interval'] == 0:
settings['scaler'] = StandardScaler().fit(all_data)
all_data = settings['scaler'].transform(all_data)
else:
all_data = settings['scaler'].transform(all_data)
return filled_prices, all_data, should_retrain
def circle_dates(dates):
'''
Transform the dates into a unit circle so the algos can learn about
seasonality. Takes a date of the form 20161231, calculates the equivalent
(x,y) coordinate using sine and cosine.
Args:
dates: list of dates specified as %Y%m%d
Returns:
x_date, y_date: unit circle of dates for a year with 366 days
'''
# The days in each month of the year.
month_vec = np.array([31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
y_date = np.zeros(dates.shape[0])
x_date = np.zeros(dates.shape[0])
# For each date, calculate the total days in the months preceding,
# add to curret day of month.
for ind, date in enumerate(dates.flatten()):
month = int(str(date)[4:6])
if month > 1:
month_days = (month_vec[0:month-1]).sum()
else:
month_days = 0
day = int(str(date)[6:8])
# Using total days, divide by 366 to turn into approximate fractional
# year.
frac_of_year = (month_days+day)/float(366)
# Convert the fractional year into radians, take the sine/cosine.
y_date[ind] = np.sin(frac_of_year*2*np.pi)
x_date[ind] = np.cos(frac_of_year*2*np.pi)
return y_date, x_date
def fillnans(data):
''' Fill in (column-wise) value gaps with the most recent non-nan value.
Leading nans remain in place. The gaps are filled-in
only after the first non-nan entry.
Args:
data (iterable)
Returns:
an array of the same size as data with the
nan-values replaced by the most recent non-nan entry.
'''
data = np.array(data, dtype=float)
for row, col in zip(np.where(np.isnan(data))):
if row > 0:
data[row, col] = data[row - 1, col]
return data
| true |
acbe96a34712a37464b34a95fc5a3c1fe50d4ded | Python | fakegit/mecache | /mecache/_file.py | UTF-8 | 1,175 | 2.515625 | 3 | [
"MIT"
] | permissive | import os
import time
import pickle
from .core import BaseCache
class File(BaseCache):
def __init__(self, path):
self.__path = path
os.makedirs(path, exist_ok=True)
@property
def path(self):
return self.__path
def _get_path(self, qual, key):
path = os.path.join(os.path.join(self.path, qual), key)
if os.path.exists(path):
return path, True
return path, False
def _get_modifiy_time(self, filepath):
"""获取文件修改时间"""
return os.stat(filepath).st_mtime
def get_cache(self, func, key, max_time):
qual = func.__qualname__
path, exist = self._get_path(qual, key)
if exist and self._get_modifiy_time(path) >= time.time() - max_time:
with open(path, 'rb') as file:
return pickle.load(file)
return None
def set_cache(self, result, func, key, max_time):
qual = func.__qualname__
path, exist = self._get_path(qual, key)
if not exist:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as file:
pickle.dump(result, file)
| true |
4c68c48d691c57e8b87cc2742655c3b39db91751 | Python | hassaanaliw/uofm-halal | /halal/models.py | UTF-8 | 3,931 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | """
Defines schema for all the tables in our database. All tables are related
Hall => Menu (One to Many)
Menu => Meal (One to Many)
Meal => Course (One to Many)
Course => MenuItem (One to Many)
Hassaan Ali Wattoo - <hawattoo@umich.edu>
"""
import json
import os
from halal import db
import datetime
class Hall(db.Model):
hall_id = db.Column(db.Integer, primary_key=True, index=True)
name = db.Column(db.String(250))
menus = db.relationship("Menu", backref="hall")
def __init__(self, name):
self.name = name
def get_folder_name_format(self):
# Returns the name formatted as the folder path name
# Bursley Dining Hall => bursley_dining_hall
return self.name.lower().replace(" ", "_")
def get_id(self):
return str(self.id)
def add(self):
db.session.add(self)
def add_menu(self, menu):
self.menus.append(menu)
def delete(self):
db.session.delete(self)
class Menu(db.Model):
menu_id = db.Column(db.Integer, primary_key=True, index=True)
date = db.Column(db.DateTime)
hall_id = db.Column(db.Integer, db.ForeignKey("hall.hall_id"))
meals = db.relationship("Meal", backref="menu")
hours = db.Column(db.String)
def __init__(self, date):
self.date = date
def get_id(self):
return str(self.id)
def load_json(self):
dir_path = (
os.path.dirname(os.path.realpath(__file__)) + "/dining_halls_json_data/"
)
dir_path = os.path.join(dir_path, self.hall.get_folder_name_format())
date = datetime.datetime.strftime(self.date, "%Y-%m-%d")
file_name = os.path.join(dir_path, date + ".json")
with open(file_name, "r") as file:
return json.load(file)
def add_meal(self, meal):
self.meals.append(meal)
def add(self):
db.session.add(self)
def delete(self):
db.session.delete(self)
class Meal(db.Model):
meal_id = db.Column(db.Integer, primary_key=True, index=True)
name = db.Column(db.String(250))
menu_id = db.Column(db.Integer, db.ForeignKey("menu.menu_id"))
courses = db.relationship("Course", backref="meal")
def __init__(self, name):
self.name = name
def get_id(self):
return str(self.id)
def add(self):
db.session.add(self)
def add_course(self, course):
self.courses.append(course)
def delete(self):
db.session.delete(self)
class Course(db.Model):
course_id = db.Column(db.Integer, primary_key=True, index=True)
name = db.Column(db.String(250))
meal_id = db.Column(db.Integer, db.ForeignKey("meal.meal_id"))
menuitems = db.relationship("MenuItem", backref="course")
def __init__(self, name):
self.name = name
def get_id(self):
return str(self.id)
def add(self):
db.session.add(self)
def add_menu_item(self, menuitem):
self.menuitems.append(menuitem)
def delete(self):
db.session.delete(self)
class MenuItem(db.Model):
menuitem_id = db.Column(db.Integer, primary_key=True, index=True)
name = db.Column(db.String(250))
course_id = db.Column(db.Integer, db.ForeignKey("course.course_id"))
halal = db.Column(db.Boolean, default=False)
def __init__(self, name):
self.name = name
def get_id(self):
return str(self.id)
def add(self):
db.session.add(self)
def delete(self):
db.session.delete(self)
def __repr__(self):
"""
Returns a nicely formatted string for printing out a menuitem to the console
Useful for debugging
Also demonstrates how we can move upwards through objects to relate a MenuItem
all the way back to a Dining Hall
:return: string
"""
return "<MenuItem %s for %s at %s>" % (
self.name,
self.course.name,
self.course.meal.menu.hall.name,
)
| true |