blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
5
133
path
stringlengths
2
333
src_encoding
stringclasses
30 values
length_bytes
int64
18
5.47M
score
float64
2.52
5.81
int_score
int64
3
5
detected_licenses
listlengths
0
67
license_type
stringclasses
2 values
text
stringlengths
12
5.47M
download_success
bool
1 class
8edf2482f1a249b4a156f4d5e08eb5e79a4b4422
Python
ssgalitsky/pymm
/pymm/access.py
UTF-8
11,266
3.390625
3
[ "MIT" ]
permissive
import re class ChildSetupVerify: """hold onto method to verify ChildSubset or SingleChild setup arguments """ @staticmethod def _verify_identifier_args(identifier): """verify that identifier dict keys contain valid (and only valid) identifiers. tag and tag_regex must contain string identifiers, while attrib_regex must be a dict with string identifiers. In the case of tag_regex and attrib_regex, the strings will be used for regex matching """ expected = {'tag': str, 'tag_regex': str, 'attrib_regex': dict} keys_expected = set(expected.keys()) keys_got = set(identifier.keys()) unexpected_keys = keys_got.difference(keys_expected) if not keys_got: raise KeyError('Expected either tag/tag_regex and/or attrib_regex') if unexpected_keys: raise KeyError('Unexpected keys found:' + str(unexpected_keys)) incompatible = set(('tag', 'tag_regex',)) if incompatible.issubset(keys_got): raise KeyError('Cannot specify both tag and tag_regex matching') for key in keys_got: val_type = expected[key] value = identifier[key] if not value or not isinstance(value, val_type): raise ValueError( str(key) + ' should be non-empty and have value of type ' + str(val_type) ) class ChildSubsetSimplified(ChildSetupVerify): """Provide simplified access to specific child elements through regex matching of descriptors such as tag, attributes, or a combination thereof. For example, if you want to simply match a tag (or tags), pass in a regular expression string that will fully match the desired tag(s). e.g. 'node|cloud' # matches any If you want to match a set of attributes, pass in a dictionary containing regexes to fully match the key(s) and value(s) of the element's attributes. For example: {'TEXT':'.*'} matches any element with a 'TEXT' attribute. {'.*': '.*flag.*'} matches any element with 'flag' in its value. {'COLOR': '.*'} matches anything with a 'COLOR' attribute. You can include any number of tag and attribute regexes, each separated by a comma. All descriptors will have to fully match in order for an element to qualify as part of this subset. Most useful for allowing access to child nodes. Provide access to slicing, removal, appending :param element: the linked element whose children will be available through ElementAccessor :param descriptor: the list of specific descriptor of elements to group and provide access to. """ def __init__(self, elementInstance, **identifier): self._verify_identifier_args(identifier) self.TAG = identifier.get('tag', None) self.TAG_REGEX = identifier.get('tag_regex', None) self.ATTRIB_REGEX = identifier.get('attrib_regex', {}) self.parent = elementInstance @classmethod def setup(cls, **identifier): """Return getter and setter methods for self, such that returned functions can be used in defining a property of an element """ self = cls(None, **identifier) def getter(parent): self.parent = parent return self def setter(parent, iterable): self.parent = parent self[:] = iterable return getter, setter def append(self, element): self.parent.children.append(element) def remove(self, element): self.parent.children.remove(element) def __len__(self): return len(self[:]) def __getitem__(self, index): if isinstance(index, int): # speed shortcut for i, elem in enumerate(self): if i == index: return elem raise IndexError('list index out of range') elements = [e for e in self] return elements[index] def __iter__(self): """Iterate through _parent's children, yielding children when they match tag/tag_regex and/or attrib_regex """ for elem in self.parent.children: if self._element_matches(elem): yield elem def _element_matches(self, elem): """return true if element matches all identifier criteria, which can include tag, tag_regex, and attrib_regex """ matches = lambda x, y, rx, ry: \ re.fullmatch(rx, x) and re.fullmatch(ry, y) if self.TAG: if self.TAG != elem.tag: return False if self.TAG_REGEX: if not re.fullmatch(self.TAG_REGEX, elem.tag): return False for regK, regV in self.ATTRIB_REGEX.items(): matching_attrib = [ key for key, val in elem.attrib.items() \ if matches(key, val, regK, regV) ] if not matching_attrib: return False return True def __setitem__(self, index, elem): """remove element(s), then re-appends after modification. Sloppy, but it works, and elements are reordered later anyways. What really matters is that the order of elements of the same tag are not altered. Note that this is very inefficient because the list is reconstructed each time a set-operation is applied """ if isinstance(index, int): e = self[index] i = self.parent.children.index(e) self.parent.children[i] = elem return subchildren = list(self) for element in subchildren: self.parent.children.remove(element) subchildren[index] = elem for element in subchildren: self.parent.children.append(element) def __delitem__(self, index): if isinstance(index, int): element = self[index] i = self.parent.children.index(element) del self.parent.children[i] elif isinstance(index, slice): indices = [] for element in self[index]: i = self.parent.children.index(element) indices.append(i) indices.sort() # delete indices from largest index to smallest for i in reversed(indices): del self.parent.children[i] class ChildSubsetCompare: """implement methods for comparing lists""" def _assert_other_is_comparable(self, other): if isinstance(other, ChildSubsetSimplified) or isinstance(other, list): return raise TypeError( 'cannot compare: ' + str(type(self)) + str(type(other)) ) def __lt__(self, other): self._assert_other_is_comparable(other) return list(self) < list(other) def __gt__(self, other): self._assert_other_is_comparable(other) return list(self) > list(other) def __le__(self, other): self._assert_other_is_comparable(other) return list(self) <= list(other) def __ge__(self, other): self._assert_other_is_comparable(other) return list(self) >= list(other) def __eq__(self, other): self._assert_other_is_comparable(other) return list(self) == list(other) def __ne__(self, other): self._assert_other_is_comparable(other) return list(self) != list(other) class ChildSubset(ChildSubsetSimplified, ChildSubsetCompare): """Provide access to specific elements within an element through matching of descriptor. Most useful for allowing access to child nodes. Provide access with indexing, slicing, removal, appending, etc. :param element: the linked element whose children will be available through ElementAccessor :param descriptor: the list of specific descriptor of elements to group and provide access to. """ def pop(self, index=-1): """Remove and return element in children list""" children = list(self) elem = children.pop(index) self.parent.children.remove(elem) return elem def extend(self, elements): self.parent.children.extend(elements) class SingleChild(ChildSetupVerify): """Provide access to a single child within an element's children. It does not directly store the child, but rather provides functions for getting, setting, and deleting the specified child from a parent element's children attribute. This is meant to be instantiated as a class property. Pass the setup fxn a tag_regex or attrib_regex in the same fashion as specifying a ChildSubset, and pass the returned values to property(). You can look at an example in Node.cloud. """ @classmethod def setup(cls, **identifier): cls._verify_identifier_args(identifier) def getter(parent): return parent.find(**identifier) def deleter(parent): deleteable = parent.find(**identifier) if deleteable is not None: parent.children.remove(deleteable) def setter(parent, child): """replace or remove child. If child passed is None, will delete first matching child. Otherwise will replace existing child with passed child or append to end of children """ if child is None: deleter(parent) return replaceable = parent.find(**identifier) if replaceable is None: parent.children.append(child) return i = parent.children.index(replaceable) parent.children[i] = child return getter, setter, deleter class SingleAttrib: """property-instantiated class to provide get/set/del access for a single attrib value within an element. For example, Node provides a .text property which accesses its .attrib['TEXT']. If del node.text were called, it would replace the attrib value an empty string: ''. In this example, attrib_name = 'TEXT', and default_value = '' Init this within a class as a property like: text = property(*SingleAttrib(attrib_name, default_value)) """ @staticmethod def setup(attrib_name, default_value): def getter(element): return element.attrib.get(attrib_name, default_value) def setter(element, value): element.attrib[attrib_name] = value def deleter(element): element.attrib[attrib_name] = default_value return getter, setter, deleter class Link: """link for a node. Sets and gets attrib['LINK'] for attached node. If user links a node, set attrib['LINK'] = node.attrib['ID'] """ @staticmethod def setup(ElementClass): def getter(parent): return parent.attrib.get('LINK') def setter(parent, url): if isinstance(url, ElementClass): url = url.attrib.get('ID') parent.attrib['LINK'] = url def deleter(parent): parent.attrib['LINK'] = None del parent.attrib['LINK'] return getter, setter, deleter
true
ca26b74cefb845b55fc2cff0d80250a6ad6627c0
Python
sitesh221b/learningPythonAcadView
/assignment7.py
UTF-8
1,410
4.53125
5
[]
no_license
# QUESTION 1 def circle_area(radius): return 3.14*radius**2 r = int(input('Enter a radius: ')) print('Area is: ', circle_area(r)) # QUESTION 2 def perfect(num): s = 0 for i in range(1, num): if num % i == 0: s += i if s == num: print('It is a Perfect Number') else: print('It is NOT a Perfect Number') print('Following are the perfect numbers from 1-1000: ') i = 1 while i <= 1000: s = 0 for j in range(1, i): if i % j == 0: s += j if s == i: print(i, end=" ") i += 1 n = input('Enter a number: ') perfect(n) # QUESTION 3 def table(x): if x == 11: print('Table Over!') else: print('12 * ', x, ' = ', 12*x ) table(x+1) # QUESTION 4 p = 1 def power(x, y): """ The function finds x^y using recursion :param x: the base :param y: the power :return: p = the final answer """ global p if y == 0: return p else: p *= x y -= 1 return power(x, y) x = int(input('Enter the base number x: ')) y = int(input('Enter the power y: ')) print('x^y = ', power(x, y)) # QUESTION 5 def factorial(x): global p dic = {} if x == 1: dic[x] = p return p else: p *= x return factorial(x-1)
true
64958df313a8d1f1b38de80636078b6e95082f67
Python
wmporter/advent2019
/day11/police.py
UTF-8
5,580
3.09375
3
[]
no_license
import sys input_file = 'input' rel_base = 0 # Return current panel color or black if panel has not been painted def get_input(): try: input_value = panels[current] except KeyError: input_value = 0 return input_value # Addition and multiplication operations # Opcodes 1 and 2 def add_or_mult(memory, oper, left, right, result): if oper // 100 % 10 == 0: left = memory[left] elif oper // 100 % 10 == 2: left = memory[left + rel_base] if oper // 1000 % 10 == 0: right = memory[right] elif oper // 1000 % 10 == 2: right = memory[right + rel_base] if oper // 10000 % 10 == 2: result = result + rel_base if oper % 100 == 1: memory[result] = left + right else: memory[result] = left * right # Save input to a memory location # Opcode 3 def input_and_save(memory, oper, dest, input_value): if oper // 100 % 10 == 2: dest = dest + rel_base memory[dest] = input_value # Read memory location and output # Opcode 4 def output(memory, oper, src): if oper // 100 % 10 == 0: src = memory[src] elif oper // 100 % 10 == 2: src = memory[src + rel_base] output_buffer.append(src) # Jump if true and jump if false operations # Return the jump destination if condition met # Otherwise, return the current instruction pointer position # advanced by 3 # Opcodes 5 and 6 def jmp_if_bool(memory, pos, oper, cond, dest): if oper // 100 % 10 == 0: cond = memory[cond] elif oper // 100 % 10 == 2: cond = memory[cond + rel_base] if oper // 1000 % 10 == 0: dest = memory[dest] elif oper // 1000 % 10 == 2: dest = memory[dest + rel_base] # Opcode 5 is jump if true, opcode 6 is jump if false if (oper % 100 == 5 and cond) or (oper % 100 == 6 and not cond): return dest else: return pos + 3 # "Less than" and "equal to" operations # Opcodes 7 and 8 def compare(memory, oper, left, right, dest): if oper // 100 % 10 == 0: left = memory[left] elif oper // 100 % 10 == 2: left = memory[left + rel_base] if oper // 1000 % 10 == 0: right = memory[right] elif oper // 1000 % 10 == 2: right = memory[right + rel_base] if oper // 10000 % 10 == 2: dest = dest + rel_base if oper % 100 == 7 and left < right: memory[dest] = 1 elif oper % 100 == 8 and left == right: memory[dest] = 1 else: memory[dest] = 0 # Adjust relative base # Opcode 9 def rel_base_adjust(memory, oper, value): global rel_base if oper // 100 % 10 == 0: value = memory[value] elif oper // 100 % 10 == 2: value = memory[value + rel_base] rel_base += value # This function is our advanced intcode computer # It executes the specified program and returns the output def computer(program): global output_buffer, current, direction # Start at position zero pos = 0 # Copy the "program" into "memory" # Since it's a list, making changes to program will affect the one in caller too memory = program[:] while True: oper = memory[pos] opcode = oper % 100 if opcode in [1,2]: add_or_mult(memory, *memory[pos:pos+4]) pos += 4 elif opcode == 3: # Modification of input to use the current panel color as input input_value = get_input() input_and_save(memory, *memory[pos:pos+2], input_value) pos += 2 elif opcode == 4: output(memory, *memory[pos:pos+2]) # Modification of output to process the output buffer when it has two values # Paint the square, change directions, and move if len(output_buffer) == 2: panels[current] = output_buffer[0] if output_buffer[1] == 0: direction = (direction - 1) % 4 else: direction = (direction + 1) % 4 move = directions[direction] current = (current[0] + move[0], current[1] + move[1]) output_buffer = [] pos += 2 elif opcode in [5, 6]: pos = jmp_if_bool(memory, pos, *memory[pos:pos+3]) elif opcode in [7, 8]: compare(memory, *memory[pos:pos+4]) pos += 4 elif opcode == 9: rel_base_adjust(memory, *memory[pos:pos+2]) pos += 2 elif opcode == 99: break else: print(f'Error, invalid opcode: {opcode}', file=sys.stderr) exit(1) return memory[0] def part_one(): # The program instructions are stored as text # Read it in and convert to list of ints with open(input_file) as f: commands = list(map(lambda x: int(x), f.read().split(','))) extra_mem = [0 for x in range(5000)] commands.extend(extra_mem) computer(commands) return len(panels.keys()) # Print all the panels, visited and unvisited def part_two(): rows = [r for r,c in panels.keys()] cols = [c for r,c in panels.keys()] row_min = min(rows), row_max = max(rows) col_min = min(cols), col_max = max(cols) symbols = ['.', '#'] for r in range(row_min, row_max+1): for c in range(col_min, col_max+1): try: print(symbols[panels[(r, c)]], end='') except KeyError: print('.', end='') print() print(part_one()) # Must run part_one() before running part_two() part_two()
true
bfcb3495299c13c77a55eec49aa061aa976f823d
Python
shen-huang/selfteaching-python-camp
/19100401/shense01/1001S01E05_array.py
UTF-8
923
4.46875
4
[]
no_license
#将数组[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]翻转 #翻转后的数组拼接成字符串 #用字符串切片的方式取出第三到第八个字符(包含第三和第八个字符) #将获得的字符串进行反转 #将结果转换为int类型 #分别转换成二进制,八进制,十六进制 #最后输出三种进制的结果 shu=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] shu.reverse() #将数组翻转 print(shu) s = '' #翻转后的数组拼接成字符串 for i in range(0,10): shu[i]=str(shu[i]) zifuchuan=s.join(shu) print(zifuchuan) r1=zifuchuan[2:8] #用字符串切片的方式取出第三到第八个字符(包含第三和第八个字符) print(r1) zhengshu=int(r1[::-1])#将获得的字符串进行反转和将结果转换为int类型 print(zhengshu) print(bin(zhengshu))#转换成二进制并输出 print(oct(zhengshu))#转换成八进制并输出 print(hex(zhengshu))#转换成十六进制并输出
true
5395634db3b488f4e2b5afbd594315b286771961
Python
ymwondimu/PracticeAlgos
/binary_search.py
UTF-8
716
3.8125
4
[]
no_license
import math def binary_search(arr, low, high, x): mid = get_midpoint(low, high) if len(arr) == 0: return False elif len(arr) == 1: if arr[0] == x: return True else: return False elif arr[mid] == x: return True elif arr[mid] > x: return binary_search(arr[low:mid], low, mid, x) elif arr[mid] < x: return binary_search(arr[mid:high], mid, high, x) return False def get_midpoint(low, high): midpoint = math.floor((high-low)/2) return midpoint def main(): arr = [2, 3, 5, 8, 12, 16, 18] x = 12 n = len(arr) b = binary_search(arr, 0, n, x) print (b) if __name__ == "__main__": main()
true
4bab9b09ca28dbace0505b32bc85dce2436ce1d1
Python
ray-tracer96024/ProjectEulerProblems
/problem_44_pent_nums.py
UTF-8
834
3.875
4
[]
no_license
def is_pentagonal_number(n): if ((((24*n) + 1)**0.5)+1)%6 == 0: return True return False # def generate_pentagonal_numbers(n, array_of_nums): # for i in range(n+1): # array_of_nums.append(int(i*((3*i)-1)/2)) # return array_of_nums def main(): # n = 15 # array_of_nums = [] # print('The series of pentagonal numbers up to {} is: {}'.format(n, generate_pentagonal_numbers(n, array_of_nums))) flag = True i = 1 while flag: for j in range(1, i): num1 = i*((3*i)-1)/2 num2 = j*((3*j)-1)/2 if is_pentagonal_number(num1 + num2) and is_pentagonal_number(num1 - num2): print(int(num1-num2)) flag = False break i += 1 if __name__ == '__main__': main()
true
4b3689f705ffa501b5f881e41e92546ae16f9761
Python
JinYeJin/algorithm-study
/November,2020~July,2021/2020-11-13/2671_최민영_잠수함식별.py
UTF-8
468
3.140625
3
[]
no_license
import re sound = input() def solution(str): check = True sound = str try: regx = re.compile('(100+1+|01)+') result = regx.fullmatch(sound) s, e = result.start(), result.end() parseData = sound.replace(sound[s:e],"") if len(parseData) > 0: check = False except: check = False if check: return "SUBMARINE" else: return "NOISE" result = solution(sound) print(result)
true
7ca59c01eef19d14b17b59836e18e8e11156622c
Python
nelimee/qtoolkit
/qtoolkit/data_structures/nearest_neighbour_structure.py
UTF-8
8,293
2.59375
3
[ "BSD-3-Clause", "CECILL-B", "MIT", "LicenseRef-scancode-cecill-b-en" ]
permissive
# ====================================================================== # Copyright CERFACS (October 2018) # Contributor: Adrien Suau (adrien.suau@cerfacs.fr) # # This software is governed by the CeCILL-B license under French law and # abiding by the rules of distribution of free software. You can use, # modify and/or redistribute the software under the terms of the # CeCILL-B license as circulated by CEA, CNRS and INRIA at the following # URL "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided # only with a limited warranty and the software's author, the holder of # the economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards # their requirements in conditions enabling the security of their # systems and/or data to be ensured and, more generally, to use and # operate it in the same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-B license and that you accept its terms. # ====================================================================== """Contains a wrapper on nearest-neighbours structures available in Python. Currently, the wrapper is quite inefficient because it initialise and query sequentially 2 different data-structures: #. An instance of :py:class:`annoy.Annoy` specialised in performing **approximate** nearest-neighbour (ANN) queries. #. An instance of :py:class:`scipy.spatial.cKDTree` specialised in performing **exact** nearest-neighbour (ENN or NN) queries. For the moment the 2 data-structures are kept like this. The reason is that I need more insights on several factors to know which one I should use: #. The maximal number of qubits on which the Solovay-Kitaev algorithm will be used. The size of the search space grows exponentially with this factor, and this size may become too large to perform exact nearest-neighbour searches. #. The error that the Solovay-Kitaev algorithm can handle. The paper `The Solovay-Kitaev algorithm, Christopher M. Dawson, Michael A. Nielsen, \ 2005 <https://arxiv.org/abs/quant-ph/0505030>`_ computed a maximum allowable error of 1/32. This needs to be investigated. #. The computational time gain: are the ANN queries faster than NN queries for our dataset (high dimensionality, reasonably high number of data). """ import copy import os.path import pickle import typing import annoy import numpy import scipy.spatial import qtoolkit.data_structures.quantum_circuit.quantum_circuit as qcirc import qtoolkit.utils.constants.others as qconsts import qtoolkit.utils.types as qtypes class NearestNeighbourStructure: """A generic and efficient data structure for nearest-neighbour requests.""" def __init__(self, data_size: int) -> None: """Initialise the NearestNeighbourStructure instance. :param data_size: Length of item vector that will be indexed """ self._annoy_index = annoy.AnnoyIndex(data_size, metric="euclidean") self._scipy_kdtree = None self._scipy_data = list() self._quantum_circuits = list() self._tree_number = -1 def add_item(self, index: int, quantum_circuit: qcirc.QuantumCircuit) -> None: """Add the given `quantum_circuit` to the indexed items. :param index: Unique index for the given quantum circuit. See the `Annoy documentation \ <https://github.com/spotify/annoy#full-python-api>`_ for more information. :param quantum_circuit: The quantum circuit that needs to be added to the search space. """ matrix = quantum_circuit.matrix vector = numpy.concatenate( (numpy.real(matrix).reshape((-1, 1)), numpy.imag(matrix).reshape((-1, 1))) ) self._annoy_index.add_item(index, vector) self._scipy_data.append(vector) self._quantum_circuits.append(copy.copy(quantum_circuit).compress()) def build(self, tree_number: int = 10) -> None: """Build the nearest-neighbour structure. This method should be called only once and only when all the item composing the search space have been added with :py:meth:`~.NearestNeighbourStructure.add_item`. Once this method has been called, items can no longer be added to the search space. :param tree_number: Parameter controlling the precision of the ANN and the computational cost of each ANN query. See the `Annoy \ documentation <https://github.com/spotify/annoy#full-python-api>`_ for more information. """ self._tree_number = tree_number self._annoy_index.build(tree_number) self._scipy_data = numpy.array(self._scipy_data).reshape( (len(self._quantum_circuits), -1) ) self._scipy_kdtree = scipy.spatial.cKDTree(self._scipy_data) def save(self, filename: str) -> None: """Save the underlying NN structures on disk. This method will create 2 files: 1. "`filename`": save of the Annoy data structure. 2. "`filename`.circ": save of the compressed :py:class:`~.QuantumCircuit`. :param filename: The filename used to save the data. """ filepath = os.path.join(qconsts.data_dir, filename) self._annoy_index.save(filepath) with open(filepath + ".circ", "wb") as of: pickle.dump(self._quantum_circuits, of) def load(self, filename: str) -> None: """Reconstruct the underlying NN structures from a file on disk. :param filename: The filename used to save the data. """ filepath = os.path.join(qconsts.data_dir, filename) self._annoy_index.load(filepath) with open(filepath + ".circ", "rb") as input_file: self._quantum_circuits = pickle.load(input_file) n = len(self._quantum_circuits) dim = 2 ** self._quantum_circuits[0].qubit_number m = 2 * dim ** 2 self._scipy_data = numpy.zeros((n, m)) for idx, circuit in enumerate(self._quantum_circuits): matrix = circuit.matrix vector = numpy.concatenate( ( numpy.real(matrix).reshape((1, -1)), numpy.imag(matrix).reshape((1, -1)), ), axis=1, ) self._scipy_data[idx] = vector self._scipy_kdtree = scipy.spatial.cKDTree(self._scipy_data) def query( self, matrix: qtypes.UnitaryMatrix ) -> typing.Tuple[float, qcirc.QuantumCircuit]: """Query the underlying data structure for nearest-neighbour of matrix. .. warning:: For the moment this method perform 2 queries: one NN query and one ANN query. See :py:mod:`~.nearest_neighbour_structure` docstring for more information. :param matrix: The matrix we are searching an approximation for. :return: The distance of the found approximation along with the index of the approximation. """ vector = numpy.concatenate( (numpy.real(matrix).reshape((-1, 1)), numpy.imag(matrix).reshape((-1, 1))) ) # Nearest neighbours nns, dists = self._annoy_index.get_nns_by_vector( vector, 1, include_distances=True ) dists_scipy, nns_scipy = self._scipy_kdtree.query(vector.reshape((-1,)), 1) # # BRUTEFORCE # distances = numpy.linalg.norm( # self._bruteforce_data - su2_trans.su2_to_so3(matrix), axis=1) # idx = numpy.argmin(distances) return dists_scipy, self._quantum_circuits[nns_scipy].uncompress()
true
91ca566714b759acf5ad09e212889f596f867215
Python
365sec/texam
/llh329/20190329/2-合并数组.py
UTF-8
253
3.125
3
[]
no_license
import sys arr = input("") num = [int(n) for n in arr[1:-1].split(",")] arr1 = input("") #arr1="['a', 'b', 'c']" str1 = [n.strip()[1:-1] for n in arr1[1:-1].split(",")] dest=[] for i in range(len(num)): dest.append({num[i]:str1[i]}) print(dest)
true
0eb1788fa0a45e4e71a5e8f42370ad7c8ecf1108
Python
Haslas/encryption_entry
/simulations/sim6.py
UTF-8
2,481
3.390625
3
[]
no_license
import hashlib title=""" _____ ____ _ | __ \ / __ \ (_) | |__) |__ _ __ | | | |_ _ _ ____ | ___/ _ \| '_ \ | | | | | | | |_ / | | | (_) | |_) | | |__| | |_| | |/ / |_| \___/| .__/ \___\_\\__,_|_/___| | | |_| """ #The password is the combination of the un-hashed answers hashedAnswers = ['3787aa805615d57b2f511dc1af9102a0b8164a8ae8d2159692a9d4cc9e033a13d45516ba789bbfe1b9455b1a3641e87ed1101c8b70574591f63e4a0e74983b46', 'e50f1295375dc64b15cb533251d6fbe3e868eb01091e223b8f864285f6b045ce8004ffaf678eb89af689e9b8599461f140486308b19deedbc1f1910d421ad6de', 'b89ac1aaf8abeffd1449785a9cfce633ab6fb331dd672cfa1ea212bdb01533fe917ec7ad9d9139ad7021dfb09cab964751588f3bf2f7ea3a73107fa4a9cd8e96', '99f97d455d5d62b24f3a942a1abc3fa8863fc0ce2037f52f09bd785b22b800d4f2e7b2b614cb600ffc2a4fe24679845b24886d69bb776fcfa46e54d188889c6f' ] #So doesn't matter that this is here? questions=["Who wanted steam to do their maths for them?", "What is the earliest known cipher?", "In what century was the Vigenere Cipher developed? (in the form ____eenth )", "What is the second most frequent letter in the English Alphabet? (in the form _ )"] def main(): print(title) print("\n\nAll of the answers have been stated in this program.") print("But you'll probably still end up googling them.") input("Press enter to start quiz...") print("\n") answers=quizLoop() password="" for answer in answers: password+=answer.replace(" ","") #pesky babbage print("YOU FINISHED THE POP QUIZ") print("password: "+password) print("\nCommands: exit()") running = True while running: command = input("Command: ") if command=="exit()": running=False else: print("Did not understand that") def quizLoop(): answers=[] index=0 #try and catch everywhere beacuse I don't have time for bug catching try: for index in range(0, len(questions)): correct=False while not correct: print(questions[index]) answer=input("Answer: ") hashedAnswer = hashAnswer(answer) if hashedAnswer == hashedAnswers[index]: correct=True answers.append(answer) print("Correct!") else: print("Incorrect") except: print("There was an error but it's all okay now") return answers def hashAnswer(answer): answer=answer.lower() answer = answer.encode("UTF-8") hashObject = hashlib.sha512(answer) hexDig = hashObject.hexdigest() return hexDig main()
true
c8f3d26eef0aa116651848190cc9644105b825e2
Python
maulberto3/netw
/zOthers/ftp_standard.py
UTF-8
1,061
2.75
3
[]
no_license
from ftplib import FTP from pprint import pprint from random import randint from time import sleep # SIMPLE FTP standard import socket as s def rand_adr(): return f'{randint(0,223)}.{randint(0,223)}.{randint(0,223)}.{randint(0,223)}' with open('ftp_ok_hosts.txt', 'w+') as file: file.write(randint()) with open('ftp_ok_hosts.txt', 'w+') as file: file.write(randint()) for i in range(1000): adr = rand_adr() print(f'Trying {adr}...') try: with FTP(adr, timeout=1) as ftp_conn: # connect to host, default port # ftp.us.debian.org ftp_conn.login() # user anonymous, passwd anonymous@ ftp_conn.dir() # with open('ftp_ok_hosts.txt', 'w+') # ftp.retrlines('LIST') # list directory contents # ftp.cwd('debian') # change into "debian" directory # ftp.nlst() # with open('README', 'wb') as fp: # ftp.retrbinary('RETR README', fp.write) # ftp.quit() except BaseException as e: print() print(e)
true
8ad095d09a0258fbfd16e38c1508157484d14f77
Python
haok61bkhn/Motion_detection
/test.py
UTF-8
591
2.65625
3
[]
no_license
import imutils import cv2 import numpy as np from motion_detection import Motion_Detection cap = cv2.VideoCapture(0) _,frame=cap.read() mtd=Motion_Detection(first_frame=frame) font = cv2.FONT_HERSHEY_SIMPLEX while True: ret, frame = cap.read() if(mtd.detect(frame)): text="movement" else: text="No_movement" cv2.putText(frame, str(text), (10,35), font, 0.75, (255,255,255), 2, cv2.LINE_AA) cv2.imshow("image",frame) ch = cv2.waitKey(1) if ch & 0xFF == ord('q'): break cv2.waitKey(0) cv2.destroyAllWindows() cap.release()
true
fa69fa198e5f56668b5be43e1c9b8291166010f6
Python
Blowoffvalve/OpenCv
/DL4CV/utilities/preprocessing/imagetoarraypreprocessor.py
UTF-8
596
2.984375
3
[]
no_license
from keras.preprocessing.image import img_to_array class ImageToArrayPreprocessor: """ The dataFormat can either be 'channels_first' i.e. d h * w or 'channels_last' h * w * d. if set to None, it uses the keras default dataFormat specified in ~/.keras/keras.json. """ def __init__(self, dataFormat = None): #store the image data format. self.dataFormat = dataFormat def preprocess(self, image): #apply the kerast img_to_array function that rearranges the dimensions of the image. return img_to_array(image, data_format=self.dataFormat)
true
ed5e27a5e27d9913d5a00067e3ce6f7e7f2cfba1
Python
NKcell/leetcode
/108.Convert Sorted Array to Binary Search Tree/108.py
UTF-8
1,245
3.734375
4
[]
no_license
# Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: def sortedArrayToBST(self, nums): """ :type nums: List[int] :rtype: TreeNode """ if len(nums) == 0: return None first = 0 last = len(nums) - 1 root = TreeNode(nums[(first + last) // 2]) self.creatLtree(first, (first + last) // 2 - 1, root, nums) self.creatRtree((first + last) // 2 + 1, last, root, nums) return root def creatLtree(self, first, last, root, nums): if first <= last: root.left = TreeNode(nums[(first + last) // 2]) self.creatLtree(first, (first + last) // 2 - 1, root.left, nums) self.creatRtree((first + last) // 2 + 1, last, root.left, nums) else: return def creatRtree(self, first, last, root, nums): if first <= last: root.right = TreeNode(nums[(first + last) // 2]) self.creatLtree(first, (first + last) // 2 - 1, root.right, nums) self.creatRtree((first + last) // 2 + 1, last, root.right, nums) else: return
true
f42826c5f0df2f0ae259a1d40a8269b3cb92aca1
Python
wdbronac/cooking
/code/naive_bayes_classification.py
UTF-8
7,737
3.234375
3
[]
no_license
import os.path import json import numpy as np import pandas as pd def load_data(path, proportion): #proportion = 1: all training set is used json_data=open(path) data = json.load(json_data) #implement a prediction of the class with the naive bayes method #Divides between the training set and the validation set train = data[0:(int)(proportion*len(data))] #valid = data[1001:2000] valid = data[len(train)::] return train, valid #--------------------------------------building the model-------------------------------------------- #1: find all the cuisines #2: find all the ingredients #3: build an "empty" total_probs dictionary #4: fill this dictionary thanks to the naive Bayes method #constructs an empty dictionary def init_total_probs(train, overwrite = False): fill = 1.0/1000000.0 #we will fill the matrix with this probability: this is made not to multiply by 0 in the probability evaluation if(overwrite == False and os.path.isfile('total_probs_init.npy')): #if there is already an initial model and we do not want to overwrite it print 'Loading the initial total probabilities matrix...' total_probs_init = np.load('total_probs_init.npy').item() print 'Matrix loaded.\n\n' return total_probs_init print 'Building an empty dictionary of all the cuisines and ingredients.' total_probs_init = dict() sample = train[0] ingredient = sample['ingredients'][0]# for the initialization of the array total_probs_init[sample['cuisine']] = np.array([[ingredient, 0]]) counter = 0 done = 0 for sample in train[0::]: percent = (int)(100*((float)(counter)/len(train))) counter+=1 if (percent)%10 == 0: if done == 0: print `percent`+'%' done = 1 else: done = 0 if sample['cuisine'] not in total_probs_init.keys(): #if the cuisine is not present yet in the total_probs_init dict total_probs_init[sample['cuisine'] ] = total_probs_init[total_probs_init.keys()[0]] # Adds the new cuisine in the total_probs_init keys for ingredient in sample['ingredients']: if ingredient not in total_probs_init[total_probs_init.keys()[0]][0:, 0]: #if the ingredient is not in the list of ingredients of the first entry of the dict for cuisine in total_probs_init.keys(): #append it to the list of ingredients of every entry of the dictionary total_probs_init[cuisine] = np.append(total_probs_init[cuisine],[[ingredient, fill]], axis = 0) print '100% \nEmpty dictionary built.\n' print 'Saving it...' np.save('total_probs_init.npy', total_probs_init) print 'Saved.\n\n' return total_probs_init #Computes the matrix of the probabilities of the cuisine def build_model(train, total_probs, overwrite = False): print 'Computing the matrix of p(C):' #TODO: I can optimize it by melting it with the previous step and I should not need to redo it every time but anyways I will do it later nb_tot =len(train) cuisine_probs = dict() for sample in train: if sample['cuisine'] not in cuisine_probs.keys(): cuisine_probs[sample['cuisine']]= 1.0/nb_tot else: cuisine_probs[sample['cuisine'] ]+=1.0/nb_tot print 'Matrix computed.\n\n' if(overwrite == False and os.path.isfile('total_probs.npy')): #if there is already a model and we do not want to overwrite it print 'Loading the total probabilities matrix of p(xi knowing C)...' total_probs= np.load('total_probs.npy').item() print 'Matrix loaded.\n\n' return total_probs, cuisine_probs print 'Computing the matrix of p(xi knowing C):' #TODO: save the model so that it doesn't need to be recomputed after counter = 0 done = 0 for cuisine in total_probs.keys(): percent = (int)(100*((float)(counter)/len(total_probs.keys()))) counter+=1 if (percent)%10 == 0: if done == 0: print `percent`+'%' done = 1 else: done = 0 for idx, ing_prob in enumerate(total_probs[cuisine]): total = 0.0 found = 0.0 for sample in train: if cuisine == sample['cuisine']: total +=1.0 if ing_prob[0] in sample['ingredients']: found +=1.0 total_probs[cuisine][idx,1] = (float)(found)/(float)(total) print 'Matrix computed.' print 'Saving it...' np.save('total_probs.npy', total_probs) print 'Saved.\n\n' return total_probs, cuisine_probs #then for every vector [ x1, x2, x3, x4], compute the score for each class, and label the vector with the most probable class. #-------------------------------------------predicting the class--------------------------------------------------------- def predict_class(sample, total_probs, cuisine_probs): #echantillon a tester : {'id':1324134, 'ingredients': [baking powder, requin]} #score has to be of this form: score = {'tex_mex': 1, 'chines': 1} ----TODO: create it of this form--------- #initiallize the score dictionary score = dict() for cuisine in total_probs.keys(): score[cuisine]=1.0 for cuisine in total_probs.keys(): for ingredient in total_probs[cuisine]: #total_probs has to be of the form {'tex mex':[['tomato sauce ', 0.8], ['pepper', 0.6] ], 'chinese':[['tomato sauce ', 0.3], ['pepper', 0.5] ]} here the probability is the prob of having 'pepper' knowing 'tex_mex' for instance if ingredient[0] in sample['ingredients']: #because the 0 elem contains the name of the ingredient and the 1 elem contains the probs to be or not to be in class C { score[cuisine] *= (float)(ingredient[1]) else: score[cuisine] *= (1.0-(float)(ingredient[1])) score[cuisine] *= cuisine_probs[cuisine] #return the cuisine the most probable for this ingredients cuisine_more_prob = max(score.iterkeys(), key=(lambda key: score[key])) return cuisine_more_prob def test(valid, total_probs, cuisine_probs): #initializing the tab for the results list_cuisines = cuisine_probs.keys() tab_results = pd.DataFrame(0, columns = list_cuisines, index = list_cuisines) print 'Computing empirical risk on the validation set.' total = 0.0 good = 0.0 counter = 0.0 done = 0 for sample in valid: l = float(len(valid)) percent = (int)(100*((float)(counter)/l)) counter+=1 if (percent)%10 == 0: if done == 0: print `percent`+'%' done = 1 else: done = 0 total += 1.0 cuisine_more_prob = predict_class(sample, total_probs, cuisine_probs) tab_results[cuisine_more_prob][ sample['cuisine']] +=1 #add the result in the tab if str(cuisine_more_prob) == sample['cuisine']: good +=1.0 result = 1.0-(good/total) print 'The empirical risk on the validation set is '+ `result`+'\n\n' print 'The tab of the repartition of results is: (rows: predicted cuisine, columns: real cuisine)' print tab_results return tab_results, result if __name__ == '__main__': #launch the program with splitting the training set into test set and validation set, and computing the risk train, valid = load_data('../data/train.json', 1.0) total_probs_init = init_total_probs(train, overwrite = True) total_probs, cuisine_probs = build_model(train, total_probs_init, overwrite = True) #tab_results, result = test(valid, total_probs, cuisine_probs)
true
f183d24102f16c778b5521865007fbf24cf5a8e6
Python
mchao409/KeyMathPy
/Combinatorics/tests/PermutationTest.py
UTF-8
140
2.71875
3
[]
no_license
def main(): # examples print(permutation(8,3)) print(permutation(5,5)) print(permutation(12,2)) if __name__== "__main__": main()
true
7356710da3c8a1ff87cdd1c89be190d1c97b9543
Python
dtbinh/swarm-simulator
/tests/vectors_tests.py
UTF-8
6,546
3.125
3
[ "MIT" ]
permissive
# tests.vectors_tests.py # Tests for the vectors package # # Author: Benjamin Bengfort <benjamin@bengfort.com> # Created: Thu Apr 24 09:57:20 2014 -0400 # # Copyright (C) 2014 Bengfort.com # For license information, see LICENSE.txt # # ID: vectors_tests.py [] benjamin@bengfort.com $ """ Tests for the vectors package """ ########################################################################## ## Imports ########################################################################## import math import unittest import numpy as np from swarm.vectors import * ########################################################################## ## Vectors Test Case ########################################################################## class VectorsTests(unittest.TestCase): def assertArrayNotWritable(self, arr): """ Ensure that an array is not writable. """ with self.assertRaisesRegexp(ValueError, "assignment destination is read-only"): arr[0] = 1.0 def test_arr_view(self): """ Test vector contstruction from a np.array """ vec = Vector.arr(np.array([10, 10])) self.assertTrue(isinstance(vec, Vector)) self.assertTrue(isinstance(vec, np.ndarray)) self.assertEqual(vec.x, 10) self.assertEqual(vec.y, 10) self.assertArrayNotWritable(vec) def test_zero_view(self): """ Test the zero vector constuction """ vec = Vector.zero() self.assertTrue(isinstance(vec, Vector)) self.assertTrue(isinstance(vec, np.ndarray)) self.assertEqual(vec.x, 0) self.assertEqual(vec.y, 0) self.assertArrayNotWritable(vec) def test_arrp_view(self): """ Test the python arr vector construction """ vec = Vector.arrp(10,0) self.assertTrue(isinstance(vec, Vector)) self.assertTrue(isinstance(vec, np.ndarray)) self.assertEqual(vec.x, 10) self.assertEqual(vec.y, 0) self.assertArrayNotWritable(vec) def test_rand_high_view(self): """ Test the random vector constructor with high limit """ vec = Vector.rand(12) self.assertTrue(isinstance(vec, Vector)) self.assertTrue(isinstance(vec, np.ndarray)) self.assertLess(vec.x, 12) self.assertLess(vec.y, 12) self.assertGreaterEqual(vec.x, 0) self.assertGreaterEqual(vec.y, 0) self.assertArrayNotWritable(vec) def test_rand_range_view(self): """ Test the random vector constructor with range """ vec = Vector.rand(6, 12) self.assertTrue(isinstance(vec, Vector)) self.assertTrue(isinstance(vec, np.ndarray)) self.assertLess(vec.x, 12) self.assertLess(vec.y, 12) self.assertGreaterEqual(vec.x, 6) self.assertGreaterEqual(vec.y, 6) self.assertArrayNotWritable(vec) def test_unit(self): """ Test the computation of the unit vector """ cases = ( (Vector.arrp(0, 10), Vector.arrp(0,1)), (Vector.arrp(10, 0), Vector.arrp(1,0)), (Vector.arrp(10, 10), Vector.arrp( 0.70710678, 0.70710678)), (Vector.zero(), Vector.zero()), ) for case, expected in cases: self.assertEqual(expected, case.unit) def test_length(self): """ Test computation of the vector length """ cases = ( (Vector.arrp(0, 10), 10), (Vector.arrp(10, 0), 10), (Vector.arrp(10, 10), 14.142135623730951), (Vector.zero(), 0.0) ) for case, expected in cases: self.assertEqual(expected, case.length) def test_orthogonal(self): """ Test the computation of the orthogonal vector """ cases = ( (Vector.arrp(0, 10), Vector.arrp(-1,0)), (Vector.arrp(10, 0), Vector.arrp(0,1)), (Vector.arrp(10, 10), Vector.arrp(-0.70710678, 0.70710678)), (Vector.arrp(-10, -10), Vector.arrp(0.70710678, -0.70710678)), ) for case, expected in cases: self.assertEqual(expected, case.orthogonal) def test_angle_degrees(self): """ Test computation of the angle in degrees Are these angles correct? """ A = Vector.arrp(10, 0) B = Vector.arrp(0, 10) E = Vector.arrp(10, 10) C = Vector.arrp(-10, 0) D = Vector.arrp(0, -10) F = Vector.arrp(-10,-10) cases = ( (A.angle(B), 90.0), (B.angle(A), 90.0), (A.angle(E), 45.0), (E.angle(F), 180.0), (E.angle(C), 135.0), (E.angle(D), 135.0), (B.angle(B), 0.0) ) for case, expected in cases: self.assertAlmostEqual(case, expected, places=4) def test_angle_radians(self): """ Test computation of the angle in radians """ A = Vector.arrp(10, 0) B = Vector.arrp(0, 10) E = Vector.arrp(10, 10) C = Vector.arrp(-10, 0) D = Vector.arrp(0, -10) F = Vector.arrp(-10,-10) cases = ( (A.angle(B, False), 0.5*np.pi), (B.angle(A, False), 0.5*np.pi), (A.angle(E, False), 0.25*np.pi), (E.angle(F, False), np.pi), (E.angle(C, False), .75*np.pi), (E.angle(D, False), .75*np.pi), (B.angle(B, False), 0.0) ) for case, expected in cases: self.assertAlmostEqual(case, expected, places=4) def test_distance(self): """ Test vector distance computations """ A = Vector.arrp(23, 7) cases = ( Vector.arrp(27, 10), Vector.arrp(27, 4), Vector.arrp(19, 4), Vector.arrp(19, 10), ) for case in cases: self.assertEqual(A.distance(case), 5.0) def test_equality(self): """ Test two vectors are equal or not equal """ A = Vector.arrp(42.0000000000000, 13.000000000000) B = Vector.arrp(42.0000000000001, 12.999999999999) C = Vector.arrp(7.0, 7.0) self.assertIsNot(A, B) self.assertEqual(A, B) self.assertNotEqual(A, C) def test_copy(self): """ Check that you can copy a readonly vector """ A = Vector.arrp(23, 52) B = A.copy() self.assertIsNot(A,B) self.assertEqual(A,B)
true
4a6ac517994a89e4fcc572d6956d276ac86fd795
Python
jack-t/monkey-interpreter
/ast.py
UTF-8
1,847
3.125
3
[]
no_license
from typing import NamedTuple, List from enum import Enum # unlike exprs, statements don't have value class Statement: pass class Program(NamedTuple): stmts: List[Statement] class Expr: pass class ExprStmt(NamedTuple, Statement): expr: Expr # both are optional: if you have only an expr, then you execute an expression and return its value; only a statement, execute it and return Void # if you've got both, then you execute the statements, then the expression # this is basically how Rust works class Block(NamedTuple, Expr): stmts: List[Statement] return_expr: Expr class LValue(NamedTuple): identifier: str class LetStmt(NamedTuple, Statement): binding: LValue expr: Expr class AssignExpr(NamedTuple, Expr): lvalue: LValue rvalue: Expr class BinaryOp(Enum): MULT = 0 DIV = 1 ADD = 2 SUB = 3 EQUALS = 4 NOT_EQUALS = 5 GREATER = 6 GREATER_EQ = 7 LESS = 8 LESS_EQ = 9 AND = 10 OR = 11 class BinaryExpr(NamedTuple, Expr): lhs: Expr op: BinaryOp rhs: Expr class UnaryOp(Enum): NEGATION = 0 NOT = 1 # boolean, but isn't this just the same as negation? class UnaryExpr(NamedTuple, Expr): expr: Expr op: UnaryOp class SymbolReferenceExpr(NamedTuple, Expr): identifier: str class FuncApplicationExpr(NamedTuple, Expr): func: Expr arguments: List[Expr] class FuncLiteralExpr(NamedTuple, Expr): param_names: List[str] expr: Expr class LiteralExpr(Expr): def __init__(self, value): if isinstance(value, int) or isinstance(value, str): self.value = value else: raise Exception("literals can only be ints or strings") def __repr__(self): if isinstance(self.value, int): return "literal {" + str(self.value) + "}" else: return "literal {\"" + self.value + "\"}" class ConditionalExpr(Expr): condition: Expr true: Expr false: Expr class LoopExpr(Expr): condition: Expr body: Expr
true
49f8b075506a09ceb2286be4fa42736cedcbc2fa
Python
bonly/exercise
/2009/20091217_regex.py
UTF-8
1,937
2.6875
3
[]
no_license
#!/usr/bin/python #-*-coding:utf-8-*- import subprocess import re import urllib2 def get_second_by_strip(): #res = subprocess.Popen(["time /home/bonly/worksp/mysql/Debug/mysql 0"],stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True) res = subprocess.Popen(["time ls -l"], stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) t0 = res.stderr.read().strip() ind = t0.split('\t') ind = ind[1].split('\n') print ind[0] def get_second_by_re(): #res = subprocess.Popen(["time /home/bonly/worksp/mysql/Debug/mysql 0"],stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True) res = subprocess.Popen(["time ls -l"], stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) t0 = res.stderr.read().strip() re_obj = re.compile(r'.m.*s') result = re_obj.findall(t0) print result #返回的列表,可通过result[0]访问第一个 def get_iter_by_re(): #res = subprocess.Popen(["time /home/bonly/worksp/mysql/Debug/mysql 0"],stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True) res = subprocess.Popen(["time ls -l"], stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) t0 = res.stderr.read().strip() #print t0 re_obj = re.compile(r'(?P<num>\b.m.*s\b)', re.VERBOSE | re.IGNORECASE ) iter = re_obj.finditer(t0) print type(iter) #print map(str,iter) #print "%s: %s" % (iter.start(),iter.end(0)) for match in iter: print "%s: %s" % (match.start(), match.group("num")) def get_url_re(): html = urllib2.urlopen('http://www.google.com.hk/search?hl=zh-CN&newwindow=1&safe=strict&q=python+finditer+compile&btnG=Google+%E6%90%9C%E7%B4%A2&aq=f&aqi=&aql=&oq=').read() pattern = r'\b(the\s+\w+)\s+' regex = re.compile(pattern, re.IGNORECASE) for match in regex.finditer(html): print "%s: %s" % (match.start(), match.group(1)) if __name__ == "__main__": #get_second_by_strip() #get_second_by_re() get_iter_by_re() #get_url_re()
true
670ea0994f13e2d87ab58273ba0260fcc632df7a
Python
stanfordnlp/stanza
/stanza/utils/datasets/ner/conll_to_iob.py
UTF-8
2,128
3.015625
3
[ "Apache-2.0" ]
permissive
""" Process a conll file into BIO Includes the ability to process a file from a text file or a text file within a zip Main program extracts a piece of the zip file from the Danish DDT dataset """ import io import zipfile from zipfile import ZipFile from stanza.utils.conll import CoNLL def process_conll(input_file, output_file, zip_file=None, conversion=None, attr_prefix="name", allow_empty=False): """ Process a single file from DDT zip_filename: path to ddt.zip in_filename: which piece to read out_filename: where to write the result label: which attribute to get from the misc field """ if not attr_prefix.endswith("="): attr_prefix = attr_prefix + "=" doc = CoNLL.conll2doc(input_file=input_file, zip_file=zip_file) with open(output_file, "w", encoding="utf-8") as fout: for sentence_idx, sentence in enumerate(doc.sentences): for token_idx, token in enumerate(sentence.tokens): misc = token.misc.split("|") for attr in misc: if attr.startswith(attr_prefix): ner = attr.split("=", 1)[1] break else: # name= not found if allow_empty: ner = "O" else: raise ValueError("Could not find ner tag in document {}, sentence {}, token {}".format(input_file, sentence_idx, token_idx)) if ner != "O" and conversion is not None: if isinstance(conversion, dict): bio, label = ner.split("-", 1) if label in conversion: label = conversion[label] ner = "%s-%s" % (bio, label) else: ner = conversion(ner) fout.write("%s\t%s\n" % (token.text, ner)) fout.write("\n") def main(): process_conll(zip_file="extern_data/ner/da_ddt/ddt.zip", input_file="ddt.train.conllu", output_file="data/ner/da_ddt.train.bio") if __name__ == '__main__': main()
true
c874a697e28816c1584dfabebc9820002c347646
Python
fafafariba/coding_challenges
/python/two_characters.py
UTF-8
1,638
4.28125
4
[]
no_license
# String t always consists of two distinct alternating characters. For example, if string t's two distinct characters are x and y, then t could be 'xyxyx' or 'yxyxy' but not 'xxyy' or 'xyyx'. # You can convert some string s to string t by deleting characters from s. When you delete a character from s, you must delete all occurrences of it in s. For example, if s = 'abaacdabd' and you delete the character 'a', then the string becomes 'bcdbd'. # Given s, convert it to the longest possible string t. Then print the length of string t on a new line; if no string t can be formed from s, print 0 instead. def two_characters(s): length = 0 explored1 = [] for letter1 in s: if letter1 not in explored1: explored1.append(letter1) explored2 = [] for letter2 in s: if letter2 not in explored2 and letter2 not in explored1: explored2.append(letter2) t = two_char_str(s, letter1, letter2) t_length = valid_t(t) if t_length > 0 and t_length > length: length = t_length return length def two_char_str(s, x, y): new_str = "" for l in s: if l == x or l == y: new_str += l return new_str def valid_t(t): if len(t) < 2: return 0 else: for i in range(1, len(t)): if t[i] == t[i - 1]: return 0 return len(t) s1 = 'cobmjdczpffbxputsaqrwnfcweuothoygvlzugazulgjdbdbarnlffzpogdprjxvtvbmxjujeubiofecvmjmxvofejdvovtjulhhfyadr' print(two_characters(s1) == 8) s2 = '' print(two_characters(s2) == 0) s3 = 'a' print(two_characters(s3) == 0) s4 = 'fd' print(two_characters(s4) == 2)
true
e7ef3354a253015573adb897f9fb8e37615c7ff6
Python
Arkleseisure/old-chess-bot
/Bits_and_Pieces.py
UTF-8
11,988
3.453125
3
[]
no_license
import time import Global_variables as Gv from Button import Button import pygame import pygame.freetype pygame.freetype.init() # loads an image def load_image(name): f = pygame.image.load(name + ".png") return f # turns a letter coordinate into a numerical x coordinate on the board def un_translate(letter): return ord(letter) - 97 # changes white to black and black to white def opposite(col): if col == "w": return "b" else: return "w" # prints to the screen def print_screen(surface, text, x, y, size, colour, left_align=True, font_type="Calibri"): # turns the text into a pygame surface font = pygame.freetype.SysFont(font_type, size, True) print_image, rect = font.render(text, colour) # blits the new text surface onto the given surface and updates the screen if not left_align: text_width, text_height = print_image.get_size() surface.blit(print_image, (x - text_width//2, y - text_height//2)) else: surface.blit(print_image, (x, y)) return print_image.get_size() # returns a list of the locations of the pieces with the given name on the board. def find_piece(name, board): squares = [] for col in range(len(board)): for row in range(len(board)): if name == board[col][row]: squares.append([col, row]) return squares # returns a list of the locations of all the pieces on the board def find_pieces(board): squares = {"Pw": [], "Pb": [], "Bw": [], "Bb": [], "Nw": [], "Nb": [], "Rw": [], "Rb": [], "Qw": [], "Qb": [], "Kw": [], "Kb": []} for col in range(len(board)): for row in range(len(board)): if board[col][row] != " ": squares[board[col][row]].append([col, row]) return squares # Waits for the user to click then returns the location of the click. def wait_for_click(clocks=None): start_time = time.time() i = 1 time_group = pygame.sprite.Group() lost_on_time = False while True: # clocks[0] is mins, clocks[1] is secs # clocks[2] is the location of the clock ("top" or "bottom") and clocks[3] is the screen if clocks is not None: # updates the clock's value if time.time() - start_time > i: i += 1 if clocks[1] != 0: clocks[1] = clocks[1] - 1 elif clocks[0] != 0: clocks[1] = 59 clocks[0] -= 1 else: return 0, 0, True, 0, 0 # draws the ticking clock to the screen x = Gv.board_top_left_x - Gv.square_size*1.1 y = 0.5*(Gv.board_top_left_y - Gv.square_size*0.4) width = Gv.square_size height = Gv.square_size * 0.4 text = str(clocks[0]) + ":" + ("0" if clocks[1] < 10 else "") + str(clocks[1]) font_size = Gv.square_size//4 if clocks[2] == "top": clock = Button(x, y, width, height, text=text, font_size=font_size) else: clock = Button(x, Gv.screen_height - y - height, width, height, text=text, font_size=font_size) for item in time_group: time_group.remove(item) time_group.add(clock) # the screen is passed into the function via the clocks list time_group.draw(clocks[3]) pygame.display.flip() # checks if the user has clicked for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONUP: mouse_x, mouse_y = pygame.mouse.get_pos() if clocks is None: clocks = [0, 0] return mouse_x, mouse_y, lost_on_time, clocks[0], clocks[1] def castling_legality(game): if game.to_play() == 'w': colour_key = 0 y = 0 else: colour_key = 1 y = 7 castling = [game.castle_queenside[colour_key], game.castle_kingside[colour_key]] if in_check(game.to_play(), game.board, [[4, y], [4, y]])[0] or castling == [False, False]: return [False, False] # checks kingside castling if castling[1] and game.board[5][y] == game.board[6][y] == ' ': if in_check(game.to_play(), game.board, [[5, y], [5, y]])[0] or \ in_check(game.to_play(), game.board, [[6, y], [6, y]])[0]: castling[1] = False else: castling[1] = False # checks queenside castling if castling[0] and game.board[1][y] == game.board[2][y] == game.board[3][y] == ' ': if in_check(game.to_play(), game.board, [[2, y], [2, y]])[0] or \ in_check(game.to_play(), game.board, [[3, y], [3, y]])[0]: castling[0] = False else: castling[0] = False return castling # turns a move from the form "original square destination square" (e.g e2e4) to the more widely recognised # "piece destination square" (e.g Qe4) which is easier for humans def proper_notation(move, game, piece_captured, prom, game_over): notation_move = "" int_move = [un_translate(move[0]), int(move[1]) - 1, un_translate(move[2]), int(move[3]) - 1] # checks for castling if game.board[int_move[2]][int_move[3]][0] == "K" and abs(int_move[2] - int_move[0]) == 2: if int_move[2] - int_move[0] == 2: return "O-O" elif int_move[0] - int_move[2] == 2: return "O-O-O" # accounts for all other non-pawn moves elif game.board[int_move[2]][int_move[3]][0] != "P" and prom == "": notation_move += game.board[int_move[2]][int_move[3]][0] # for if a pawn has captured a piece elif piece_captured is True: notation_move += move[0] + "x" + move[2] + move[3] if prom != "": notation_move += "=" + prom return notation_move # any other pawn scenario else: notation_move += move[2] + move[3] if prom != "": notation_move += "=" + prom return notation_move ''' Finds other pieces which have the same name. If another piece of the same type and colour can move to the same destination square, then a coordinate to specify the moving piece is added ''' p = game.piece_dict[move[2:4]] game.board[int_move[2]][int_move[3]] = " " for p2 in game.piece_list: if p2.name == p.name and (p.x != p2.x or p.y != p2.y): for action in p2.get_moves(game.board): if action[2] == move[2] and action[3] == move[3]: if un_translate(action[0]) != un_translate(move[0]): notation_move += move[0] else: notation_move += move[1] game.board[int_move[2]][int_move[3]] = p.name # "x" signifies that the move was a capturing move if piece_captured is True: notation_move += "x" # adds the destination square notation_move += move[2] + move[3] # "+" signifies that the opponent is now in check, "#" signifies that the move checkmated the opponent is_in_check, checking_piece = in_check(game.to_play(), game.board, game.king_loc) if is_in_check: if game_over: notation_move += "#" else: notation_move += "+" return notation_move # verifies whether one colour is in check def in_check(colour, board, king_loc): loc = king_loc[0 if colour == "w" else 1] directions = [{0: 1, 1: 1}, {0: 1, 1: 0}, {0: 1, 1: -1}, {0: 0, 1: 1}, {0: 0, 1: -1}, {0: -1, 1: 1}, {0: -1, 1: 0}, {0: -1, 1: -1}] # all directions are from white's perspective for direction in directions: i = 1 looping = True x = loc[0] + direction[0] y = loc[1] + direction[1] while 8 > x > -1 and 8 > y > -1 and looping: if board[x][y] != " ": looping = False if board[x][y][-1] == opposite(colour): piece = board[x][y][0] if piece == "Q" or (piece == "B" and direction[0] != 0 and direction[1] != 0) \ or (piece == "R" and (direction[0] == 0 or direction[1] == 0)) \ or (piece == "K" and i == 1) \ or (piece == "P" and i == 1 and direction[0] != 0 and direction[1] == (1 if colour == "w" else -1)): return True, translate_coordinates(x, y, loc[0], loc[1]) i += 1 x += direction[0] y += direction[1] # looks for knight moves squares = [{0: 2, 1: 1}, {0: 2, 1: -1}, {0: 1, 1: 2}, {0: 1, 1: -2}, {0: -1, 1: 2}, {0: -1, 1: -2}, {0: -2, 1: 1}, {0: -2, 1: -1}] # removes knight moves which would end up off the board i = 0 while i < len(squares): if not 8 > loc[0] + squares[i][0] > -1 or not 8 > loc[1] + squares[i][1] > -1: squares.pop(i) else: i += 1 # does the actual checking of moves for square in squares: if board[loc[0] + square[0]][loc[1] + square[1]] == "N" + opposite(colour): return True, translate_coordinates(loc[0] + square[0], loc[1] + square[1], loc[0], loc[1]) return False, None # function used to improve efficiency when finding legal moves. # It checks whether any checks which have been given in the past are still check def still_in_check(previous_checks, board, current_player, king_loc): # gives the location of the king loc = king_loc[0 if current_player == "w" else 1] # loops through previous moves that have given check. for move in previous_checks: int_move = [un_translate(move[0]), int(move[1]) - 1, un_translate(move[2]), int(move[3]) - 1] vertical = 1 if int_move[3] > int_move[1] else -1 horizontal = 1 if int_move[2] > int_move[0] else -1 move_is_check = True if board[int_move[0]][int_move[1]][-1] == opposite(current_player) and \ loc[0] == int_move[2] and loc[1] == int_move[3]: # checks for vertical blocking if move[0] == move[2]: for i in range(int_move[1] + vertical, int_move[3], vertical): if board[int_move[2]][i] != " ": move_is_check = False # checks for horizontal blocking elif move[1] == move[3]: for i in range(int_move[0] + horizontal, int_move[2], horizontal): if board[i][int_move[3]] != " ": move_is_check = False # checks for blocking along the diagonals elif abs(int_move[2] - int_move[0]) == abs(int_move[3] - int_move[1]): for i in range(abs(int_move[3] - int_move[1]) - 1): if board[int_move[0] + horizontal * (i + 1)][int_move[1] + vertical * (i + 1)] != " ": move_is_check = False else: move_is_check = False if move_is_check: return True return False # given two sets of coordinates, starting and finishing positions, returns a move def translate_coordinates(x1, y1, x2, y2): return chr(x1 + 97) + str(y1 + 1) + chr(x2 + 97) + str(y2 + 1) # returns the coordinates of a given square of the form a3, b4, ... def translate(x, y): return chr(x + 97) + str(y + 1) def to_time(secs): secs = round(secs) # ensures that 59.6s doesn't get displayed as 60s for instance return '{0}h {1}min {2}s'.format(int(secs//3600), int((secs % 3600)//60), int(secs % 60))
true
cace1fa90e2f146d2f4fb06fa3c7dfc98a6bfe3d
Python
oftensmile/indra
/indra/sources/trips/drum_reader.py
UTF-8
3,034
2.609375
3
[ "BSD-2-Clause" ]
permissive
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str import os import sys import random import logging try: from kqml import KQMLModule, KQMLPerformative, KQMLList have_kqml = True except ImportError: KQMLModule = object have_kqml = False logger = logging.getLogger('drum_reader') class DrumReader(KQMLModule): """Agent which processes text through a local TRIPS/DRUM instance. This class is implemented as a communicative agent which sends and receives KQML messages through a socket. It sends text (ideally in small blocks like one sentence at a time) to the running DRUM instance and receives extraction knowledge base (EKB) XML responses asynchronously through the socket. To install DRUM and its dependencies locally, follow instructions at: https://github.com/wdebeaum/drum Once installed, run `drum/bin/trips-drum -nouser` to run DRUM without a GUI. Once DRUM is running, this class can be instantiated as `dr = DrumReader(to_read=text_list)`, at which point it attempts to connect to DRUM via the socket and send the texts for reading. Receiving responses can be started as `dr.start()` which waits for responses from the reader and returns when all responses were received. Once finished, the list of EKB XML extractions can be accessed via `dr.extractions`. Parameters ---------- to_read : list[str] A list of text strings to read with DRUM. Attributes ---------- extractions : list[str] A list of EKB XML extractions corresponding to the input text list. """ def __init__(self, **kwargs): if not have_kqml: raise ImportError('Install the `pykqml` package to use ' + 'the DrumReader') self.to_read = kwargs.pop('to_read', None) super(DrumReader, self).__init__(name='DrumReader') self.msg_counter = random.randint(1, 100000) self.ready() self.extractions = [] self.reply_counter = len(self.to_read) for text in self.to_read: self.read_text(text) def read_text(self, text): print('Reading %s' % text) msg_id = 'RT000%s' % self.msg_counter kqml_perf = _get_perf(text, msg_id) self.send(kqml_perf) self.msg_counter += 1 def receive_reply(self, msg, content): extractions = content.gets(':extractions') self.extractions.append(extractions) self.reply_counter -= 1 if self.reply_counter == 0: self.exit(0) def _get_perf(text, msg_id): text = text.encode('utf-8') msg = KQMLPerformative('REQUEST') msg.set('receiver', 'DRUM') content = KQMLList('run-text') content.sets('text', text) msg.set('content', content) msg.set('reply-with', msg_id) return msg if __name__ == '__main__': to_read = ['MEK phosphorylates ERK1.', 'BRAF phosphorylates MEK1.'] dr = DrumReader(to_read=to_read) dr.start()
true
824c61844696aaebb6999ed46efb28b8c50e2823
Python
bgoonz/UsefulResourceRepo2.0
/MY_REPOS/Lambda-Resource-Static-Assets/2-resources/_External-learning-resources/02-pyth/python-ds-master/data_structures/graphs/Adjacency_matrix.py
UTF-8
828
3.46875
3
[ "MIT" ]
permissive
class Graph: def __init__(self, vertices, directed: bool): self.V = vertices self.e = 0 self.d = directed self.graph = [] for i in range(self.V): lst = [0] * self.V self.graph.append(lst) def add_edge(self, ver1, ver2): if self.d: self.graph[ver1][ver2] = 1 else: self.graph[ver1][ver2] = 1 self.graph[ver2][ver1] = 1 def remove_edge(self, ver1, ver2): if self.d[ver1][ver2] == 0: print("No edge between %d and %d" % (ver1, ver2)) return if self.d: self.graph[ver1][ver2] = 0 else: self.graph[ver1][ver2] = 0 self.graph[ver2][ver1] = 0 def print_graph(self): for i in self.graph: print(i)
true
73e07aa4f6ca294155783fb2041d7034c753d3db
Python
RJHughes/TwitterSentiment
/app/sentiment.py
UTF-8
3,422
3.296875
3
[]
no_license
import ast def get_sentiment(query): """ Given a query, this function returns the sentiment for the items in that query and returns the dates and sentiment for each date """ print('Num entries:' + str(len(query))) # Now we're going to extract the sentiment and date information and get the average sentiment on a particular date sentiment_array = [] pos_sent_dict = {} neg_sent_dict = {} pos_count_dict = {} neg_count_dict = {} hash_dict = {} hash_list = [] fav_max = 0 for entry in query: sentiment_array.append(entry.sentiment) hashtags = ast.literal_eval(entry.hashtags) if len(hashtags)!=0: for tags in hashtags: tags = dict(tags) if tags['text'] in hash_dict: hash_dict[tags['text']] = hash_dict[tags['text']] + 100 else: hash_dict[tags['text']] = 100 # We need to remove the timezone, day and hour data temp_date = entry.created_at.split() temp_date.pop(0) temp_date.pop(2) temp_date.pop(2) # This makes it a datetime object for easier working # formatted_date = datetime.datetime.strptime(' '.join(temp_date), '%b %d %Y') formatted_date = ' '.join(temp_date) # We now take the average of the sentiment by keeping a running average # Positive sentiment if entry.sentiment > 0: if formatted_date in pos_sent_dict: pos_count_dict[formatted_date] = pos_count_dict[formatted_date] + 1 pos_sent_dict[formatted_date] = (pos_sent_dict[formatted_date] + (entry.sentiment - pos_sent_dict[formatted_date]) / pos_count_dict[formatted_date]) else: pos_count_dict[formatted_date] = 1 pos_sent_dict[formatted_date] = 1 else: if formatted_date in neg_sent_dict: neg_count_dict[formatted_date] = neg_count_dict[formatted_date] + 1 neg_sent_dict[formatted_date] = (neg_sent_dict[formatted_date] + (entry.sentiment - neg_sent_dict[formatted_date]) / neg_count_dict[formatted_date]) else: neg_count_dict[formatted_date] = 1 neg_sent_dict[formatted_date] = 0 date_list = [] pos_sentiment_list = [] neg_sentiment_list = [] total_search = [] for key in sorted(pos_sent_dict): date_list.append(key), \ pos_sentiment_list.append(pos_sent_dict[key]), \ for key in sorted(neg_sent_dict): if key not in date_list: date_list.append(key) neg_sentiment_list.append(neg_sent_dict[key]), \ for key in sorted(date_list): pos_cont = 0 neg_cont = 0 if key in pos_count_dict: pos_cont = pos_count_dict[key] if key in neg_count_dict: neg_cont = neg_count_dict[key] total_search.append(pos_cont+neg_cont) for items in hash_dict: hash_list.append([items,hash_dict[items]]) print(total_search) print(neg_count_dict) print(pos_count_dict) print(sorted(date_list)) return sorted(date_list), pos_sentiment_list, neg_sentiment_list, total_search, hash_list
true
8f45354095106802a32c4f2b53ca8aee3524f982
Python
ANoonan93/Python_code
/Euler_7.py
UTF-8
382
3.90625
4
[ "MIT" ]
permissive
import math def isprime(number): if number <= 1: return False if number == 2: return True if number %2 == 0: return False for i in range(3, int(math.sqrt(number))+1): if number %i == 0: return False return True num = 0 prime = 0 while prime < 10001: if isprime(num) == True: prime += 1 num += 1 print(num, "is the 10001st prime")
true
ebc65190e47b7c044429939955bd0ac4e094030b
Python
wlstjdpark/EffectivePython
/Chapter29/Chapter29.py
UTF-8
4,224
3.578125
4
[]
no_license
# 메타클래스와 속성 # 메타클래스를 이용하면 파이썬 class 문을 가로채서 클래스가 정의될 때마다 특별한 동작을 제공할 수 있다. # 속성 접근을 동적으로 사용자화하는 파이썬의 내장 기능이 있다. # 동적 속성을 오버라이드 하다가 예상치 못한 부작용을 일으킬 수 있고, # 메타클래스는 내부적으로 동작하는게 많기 때문에 최소한으로 사용하는 것이 좋다. # 게터와 세터 메서드 대신에 일반 속성을 사용하자. class OldResistor(object): def __init__(self, ohms): self._ohms = ohms def get_ohms(self): return self._ohms def set_ohms(self, ohms): self._ohms = ohms print('OldResister') r0 = OldResistor(3) print('Before: %5r' % r0.get_ohms()) r0.set_ohms(10) print('After: %5r' % r0.get_ohms()) # 아래와 같이 사용하기엔 불편하다. r0.set_ohms(r0.get_ohms() + 10) # 아래와 같이 속성을 사용하면 편하다. class Resistor(object): def __init__(self, ohms): self.ohms = ohms self.voltage = 0 self.current = 0 print('Resister') r1 = Resistor(3) print('Before: %5r' % r1.ohms) r1.ohms = 20 print('After: %5r' % r1.ohms) # 속성을 이용하여 += 연산도 쉽게 사용 r1.ohms += 20 print('After adding: %5r' % r1.ohms) # 상위 클래스의 속성에 대해 property를 제공하고 하위 클래스에서 _voltage 속성을 사용한다. class VoltageResistance(Resistor): def __init__(self, ohms): super().__init__(ohms) self._voltage = 3 @property def voltage(self): return self._voltage @voltage.setter def voltage(self, voltage): self._voltage = voltage self.current = self._voltage / self.ohms # 속성을 @property로 사용하여 원하는 동작을 할 수 있다. (current 자동 셋팅) print('VoltageResistance') r2 = VoltageResistance(3) print('Before: %5r amps' % r2.current) r2.voltage = 10 print('After: %5r amps' % r2.current) # property의 setter를 이용하여 범위 체크도 가능하다. class BoundedResistance(Resistor): def __init__(self, ohms): super().__init__(ohms) @property def ohms(self): return self._ohms @ohms.setter def ohms(self, ohms): if ohms <= 0: raise ValueError('%f ohms must be > 0' % ohms) self._ohms = ohms r3 = BoundedResistance(3) #r3.ohms = 0 # 생성자에도 잘못된 값이 오는 순간 바로 캐치가 가능하다. #r3 = BoundedResistance(-3) # super().__init__(ohms) -> self.ohms = ohms -> ohms.setter 호출 #property setter 속성을 사용하여 omhs 속성에 대해 immutable 설정이 가능하다. class FixedResistance(Resistor): def __init__(self, ohms): super().__init__(ohms) self.__ohms = 0 @property def ohms(self): return self._ohms @ohms.setter def ohms(self, ohms): if hasattr(self, '_ohms'): raise AttributeError("Can't set attribute") self._ohms = ohms r4 = FixedResistance(3) #r4.ohms = 5 # property에서 다른 속성의 값을 변경하는 것은 사용자 입장에서 의도하지 않는 일이라 볼 수도 있다. class MysteriousResistor(Resistor): def __init__(self, ohms): super().__init__(ohms) @property def ohms(self): self.voltage = self._ohms * self.current return self._ohms @ohms.setter def ohms(self, ohms): self._ohms = ohms print('MysteriousResistor') r7 = MysteriousResistor(20) r7.current = 0.01 print('Before: %5r' % r7.voltage) r7.ohms print('After: %5r' % r7.voltage) class testClass(object): _private_test = 0 pass hi = testClass() hi._private_test = 0 class testClass2(testClass): __metaclass__ = 3 print(testClass) print(testClass()) print(testClass2) print(testClass2()) # 객체의 상태를 수정하는 일은 setter에서 수행하자. # 모듈을 동적으로 임포트하거나, 느린 헬퍼 함수를 실행하거나, 비용이 많이 드는 작업처럼 # 호출부에서 에측하지 못할만한 부작용은 피하자. # 느리거나 복잡한 작업은 메서드가 하도록하자.
true
cd825a14a0684cc33991992235bda855781e5de3
Python
bingli8802/leetcode
/0030_HARD_Substring_with_Concatenation_of_All_Words.py
UTF-8
3,331
3.234375
3
[]
no_license
class Solution(object): def findSubstring(self, s, words): """ :type s: str :type words: List[str] :rtype: List[int] """ from collections import Counter if not s or not words: return [] one_word = len(words[0]) all_len = len(words) * one_word n = len(s) words = Counter(words) res = [] for i in range(0, n - all_len + 1): tmp = s[i:i+all_len] c_tmp = [] for j in range(0, all_len, one_word): c_tmp.append(tmp[j:j+one_word]) if Counter(c_tmp) == words: res.append(i) return res def findSubstring(self, s, words): """ :type s: str :type words: List[str] :rtype: List[int] """ dic = dict(collections.Counter(words)) len_of_each_word = len(words[0]) len_of_words = len_of_each_word * len(words) len_of_s = len(s) valid = 0 res = [] if len_of_words > len_of_s: return [] for i in range(len_of_s - len_of_words + 1): mapping = dic.copy() valid = 0 for j in range(i, len_of_s - len_of_each_word + 1, len_of_each_word): tmp = s[j:j+len_of_each_word] if tmp not in mapping: break elif tmp in mapping: mapping[tmp] -= 1 if mapping[tmp] < 0: break if mapping[tmp] == 0: valid += 1 if valid == len(set(words)): res.append(i) break return res def findSubstring(self, s, words): """ :type s: str :type words: List[str] :rtype: List[int] """ wordLen = len(words[0]) wordCount = len(words) windowSize = wordLen * wordCount freqMap = dict(collections.Counter(words)) res = [] for i in range(wordLen): start = i #sliding window technique while start + windowSize - 1 < len(s): substring = s[start : start+windowSize] j = wordCount temp = {} while j > 0: # for each word in the window # get the word, check its freq tempword = substring[wordLen*(j-1) : wordLen*j] count = temp[tempword] + 1 if tempword in temp else 1 '''if tempword's freq in temp != tempword's freq in freqMap, it means tempword does not appear as many times in the substring as it does in the original string, based on tempword's freq in the words list''' if tempword not in freqMap or count > freqMap[tempword]: break temp[tempword] = count j -= 1 if j == 0: res += [start] start += wordLen * max(j, 1) return res
true
00fe4b6659968c8697a6ae19ebf6ae514d718481
Python
zhudaxia666/shuati
/左神算法课代码/day2/2荷兰国旗.py
UTF-8
1,095
3.96875
4
[]
no_license
''' 给定一个数组arr,和一个数num,请把小于nums的数放在数组的左边,等于nums的数放在数组的中间,大于nums的数放在数组的右边 要求时间空间复杂度为o(1),时间复杂度o(n) ''' ''' 思路和第一个相似。只不过要设置两个指针,前指针less和后指针more,less在从前面开始,more从后面开始,0-less局域表示小于nums的区域,初始值less为-1,more为n 如果当前遍历的元素cur小于nums,将less后一个元素与cur交换,less加1 如果当前元素等于nums,将继续遍历 如果当前遍历的元素cur大于nums,将more-1后与cur交换,然后在判断交换后的cur值与nums的关系 ''' def code(arr,l,r,num): less=l-1 more=r+1 cur=l while cur<more: if arr[cur]<num: less+=1 arr[cur],arr[less]=arr[less],arr[cur] cur+=1 elif arr[cur]>num: more-=1 arr[cur],arr[more]=arr[more],arr[cur] else: cur+=1 return less+1,more-1#返回的是等于区域的左右边界
true
845ac7a4ee4f535f673fd658b59e61ff4ea72a44
Python
shiwuhao/python
/再谈抽象/demo7.py
UTF-8
490
3.203125
3
[]
no_license
# /usr/bin/env python3 from abc import ABC, abstractmethod class Talker(ABC): @abstractmethod def talk(self): pass class knigget(Talker): def talk(self): print(111) k = knigget() k.talk() print(isinstance(k, Talker)) class Herring: def talk(self): print('Blub') h = Herring() h.talk() print(isinstance(h, Talker)) print('------------------------') Talker.register(Herring) print(isinstance(h, Talker)) print(issubclass(Herring, Talker))
true
f96e0663119ba51d645fe13698cb35187728f4fb
Python
kylemede/SMODT
/sandbox/pythonSandbox/OpticalIRProblemSet3.py
UTF-8
6,177
3.015625
3
[]
no_license
import math as m H = 0.2#m T = 273.0#K p = 18.5e-6#m d = 37.888e-3#m F = 12.0#unitless f = 12.0 #m h = 6.626e-34 #Js c = 299792458 #m/s print "PROBLEM SET 3 ANSWERS\n" ## PROBLEM 3-1 print "\nAnswers to problem 3-1:\n" # First for Ks-band Lambda = 2.15 ##microns deltaLambda = 0.3 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second in Ks-band= ",N # Second for K-band Lambda = 2.2 ##microns deltaLambda = 0.35 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second in K-band= ",N #--------------------------------------------------------------------------------- ## PROBLEM 3-2 print "\nAnswers to problem 3-2:\n" # Dividing the Ks-band up into 3 sections [1.85-2.05], [2.05-2.25] and [2.25-2.45] # These corresponds to deltaLambda = 0.1 and Lambda = 1.95, 2.25 and 2.35. # Calculating N for each band: Lambda = 1.95 ##microns deltaLambda = 0.1 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second for [1.85-2.05] of Ks-band = ",N Lambda = 2.25 ##microns deltaLambda = 0.1 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second for [2.05-2.25] of Ks-band = ",N Lambda = 2.35 ##microns deltaLambda = 0.1 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second for [2.25-2.45] of Ks-band = ",N #--------------------------------------------------------------------------------- # Dividing up the K-band into 4 sections [1.85-2.025], [2.025-2.2], [2.20-2.375] and [2.375-2.55] # These correspond to deltaLambda = 0.0875 and Lambda = 1.9375, 2.1125, 2.2875 and 2.4625 # Calculating N for each band: Lambda = 1.9375 ##microns deltaLambda = 0.0875 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "\nNumber of photons/second for [1.85-2.025] of K-band = ",N Lambda = 2.1125 ##microns deltaLambda = 0.0875 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second for [2.025-2.2] of K-band = ",N Lambda = 2.2875 ##microns deltaLambda = 0.0875 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second for [2.20-2.375] of K-band = ",N Lambda = 2.4625 ##microns deltaLambda = 0.0875 ##microns Na = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*H*h*c*m.pow(Lambda,4.0)*(m.exp(14387.7/(Lambda*T))-1.0)) Nb = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) N = Na*Nb print "Number of photons/second for [2.375-2.55] of K-band = ",N #********************************************************************************** ## PROBLEM 2 # Using the same band sections of problem 3-2 print "\nAnswers to problem 3-3:\n" N = 10.0#K # First for the Ks-band: Lambda = 1.95 ##microns deltaLambda = 0.1 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) #NOTE: m.log1p calculates the natural logarithm of x+1, thus the +1 of the derived equation is dropped Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "Temperature where only 10 photons/second are received for [1.85-2.05] of Ks-band = ",Tout Lambda = 2.25 ##microns deltaLambda = 0.1 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "Temperature where only 10 photons/second are received for [2.05-2.25] of Ks-band = ",Tout Lambda = 2.35 ##microns deltaLambda = 0.1 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "Temperature where only 10 photons/second are received for [2.25-2.45] of Ks-band = ",Tout #--------------------------------------------------------------------------------- # Second for the K-band: Lambda = 1.9375 ##microns deltaLambda = 0.0875 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "\nTemperature where only 10 photons/second are received for [1.85-2.025] of K-band = ",Tout Lambda = 2.1125 ##microns deltaLambda = 0.0875 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "Temperature where only 10 photons/second are received for [2.025-2.2] of K-band = ",Tout Lambda = 2.2875 ##microns deltaLambda = 0.0875 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "Temperature where only 10 photons/second are received for [2.20-2.375] of K-band = ",Tout Lambda = 2.4625 ##microns deltaLambda = 0.0875 ##microns insideA = (d*m.pow(p,2.0)*deltaLambda*(1.191e8))/(8.0*N*H*h*c*m.pow(Lambda,4.0)) insideB = ((d/H)+(m.sqrt(2.0)/F))*m.pow(10.0,-6.0) Tout = 14387.7/(Lambda*m.log1p(insideA*insideB)) print "Temperature where only 10 photons/second are received for [2.375-2.55] of K-band = ",Tout
true
640ee16dbf96e97f0b65d0023a55f54011321afe
Python
schnitzelbub/bocadillo
/bocadillo/views.py
UTF-8
4,556
3.125
3
[ "MIT" ]
permissive
import inspect from functools import partial, wraps from typing import List, Union, Any, Dict from .app_types import Handler from .compat import call_async, camel_to_snake from .constants import ALL_HTTP_METHODS MethodsParam = Union[List[str], all] class HandlerDoesNotExist(Exception): # Raised to signal that no handler exists for a requested HTTP method. pass class View: """This class defines how all HTTP views are represented internally. ::: warning Views objects should not be created directly. Instead, use [from_handler](#from-handler) or [from_obj](#from-obj). ::: HTTP methods are mapped to **handlers**, i.e. methods of a `View` object. The following handlers are supported: - `.get(req, res, **kwargs)` - `.post(req, res, **kwargs)` - `.put(req, res, **kwargs)` - `.patch(req, res, **kwargs)` - `.delete(req, res, **kwargs)` - `.head(req, res, **kwargs)` - `.options(req, res, **kwargs)` - `.handle(req, res, **kwargs)` ::: tip `.handle()` is special: if defined, it overrides all others. ::: # Attributes name (str): the name of the view. """ def __init__(self, name: str): self.name = name get: Handler post: Handler put: Handler patch: Handler delete: Handler head: Handler options: Handler @classmethod def create(cls, name: str, docstring: str, handlers: dict) -> "View": # Create a view object. view = cls(name) view.__doc__ = docstring # Convert handlers to async if necessary for method, handler in handlers.items(): if not inspect.iscoroutinefunction(handler): handler = wraps(handler)(partial(call_async, handler)) handlers[method] = handler # Set head handler if not given but get is given. if "get" in handlers and "head" not in handlers: handlers["head"] = handlers["get"] for method, handler in handlers.items(): setattr(view, method, handler) return view def _get_handler(self, req): if hasattr(self, "handle"): return self.handle return getattr(self, req.method.lower()) async def __call__(self, req, res, **kwargs): try: handler: Handler = self._get_handler(req) except AttributeError as e: raise HandlerDoesNotExist from e else: await handler(req, res, **kwargs) def from_handler(handler: Handler, methods: MethodsParam = None) -> View: """Convert a handler to a `View` instance. # Parameters handler (function or coroutine function): Its name and docstring are copied onto the view. It used as a handler for each of the declared `methods`. For example, if `methods=["post"]` then the returned `view` object will only have a `.post()` handler. methods (list of str): A list of supported HTTP methods. The `all` built-in can be used to support all HTTP methods. Defaults to `["get"]`. # Returns view (View): a `View` instance. # See Also - The [constants](./constants.md) module for the list of all HTTP methods. """ if methods is None: methods = ["get"] if methods is all: methods = ["handle"] else: methods = [m.lower() for m in methods] handlers = {method: handler for method in methods} return View.create(handler.__name__, handler.__doc__, handlers) def from_obj(obj: Any) -> View: """Convert an object to a `View` instance. # Parameters obj (any): its handlers, snake-cased class name and docstring are copied onto the view. # Returns view (View): a `View` instance. """ handlers = get_handlers(obj) name = camel_to_snake(obj.__class__.__name__) return View.create(name, obj.__doc__, handlers) def get_handlers(obj: Any) -> Dict[str, Handler]: """Return all `View` handlers declared on an object. # Parameters obj (any): an object. # Returns handlers (dict): A dict mapping an HTTP method to a handler. """ return { method: getattr(obj, method) for method in ("handle", *map(str.lower, ALL_HTTP_METHODS)) if hasattr(obj, method) } def view(methods: MethodsParam = None): """Convert the decorated function to a proper `View` object. This decorator is a shortcut for [from_handler](#from-handler). """ return partial(from_handler, methods=methods)
true
688a13e792aa6360f053a959a75465c995a4e141
Python
ivenpoker/Python-Projects
/online-workouts/codewars/python/who_likes_it.py
UTF-8
1,268
3.671875
4
[ "MIT" ]
permissive
#!/usr/bin/env python3 ####################################################################################### # # # Program purpose: Recreation of facebook feature on post likes. # # # # Program Author : Happi Yvan <ivensteinpoker@gmail.com> # # Creation Date : September 10, 2020 # # # ####################################################################################### def likes(persons: list) -> str: if len(persons) == 1: return f"{persons[0]} likes this" elif len(persons) == 2: return f"{' and '.join(persons)} like this" elif len(persons) == 3: return f"{', '.join(persons[0:2])} and {persons[2]} like this" else: return f"{', '.join(persons[0:2])} and {len(persons[2:])} others like this" if __name__ == "__main__": print(likes(["Peter"])) print(likes(["Jacob", "Alex"])) print(likes(["Max", "John", "Mark"])) print(likes(["Alex", "Jacob", "Mark", "Max"]))
true
ce3a22ea0da51d3ee2b1497d6c9012e11cf479ab
Python
Acheros/Dotfiles
/python_exercise/exercise24.py
UTF-8
594
3.921875
4
[]
no_license
#!/usr/bin/python3 import math contents = [] print("please input a set of instruction:") while True: try: a = input("") except EOFError: break contents.append(a) x = 0 y = 0 distant = 0 for value in contents: new_value = value.split(" ") if new_value[0] == "UP": y += int(new_value[1]) elif new_value[0] == "DOWN": y -= int(new_value[1]) elif new_value[0] == "RIGHT": x += int(new_value[1]) elif new_value[0] == "LEFT": x -= int(new_value[1]) distant = math.hypot(x, y) print("the distant is:", distant)
true
89923f713cce1ccab80a71c540ffe74d730a8bb5
Python
gavinconran/ArtNet
/ArtNet_Supporting_Documentation/05_Descartes/Codes/06b_FourierSeries_SawToothWave.py
UTF-8
2,064
3.25
3
[]
no_license
# Plotting Code for Saw Tooth Wave import numpy as np import math import matplotlib.pyplot as plt import matplotlib as mpl from scipy import signal import scipy.fftpack mpl.style.use('classic') def FS_SawTooth(k, tt): ''' k, waveNumbers, is a list of integer wave numbers tt is a list of time stamps returns the aggregated saw tooth wave ''' Cn = 0 # constant term return Cn + np.sum([((2.0 * (-1)**(k+1)) / k) * np.sin(k*t) for t in tt], axis=1) # sum of waves tt = np.arange(0, 2*np.pi + 0.01, 0.01) # time scale tt_fs = np.arange(-np.pi, np.pi + 0.01, 0.01) # time scale # Plot saw tooth wave y_saw = signal.sawtooth(tt) #(tt) y_square = signal.square(tt) plt.figure(1) plt.title("Saw Tooth Wave") plt.ylabel("Magnitude") plt.xlabel("Time (seconds)") plt.plot(tt, y_saw, 'black') plt.ylim(-2, 2) plt.xlim(-1, 7) plt.show() # Fourier Series k_max3 = 3 k_all3 = np.arange(1,k_max3+1) # list of consecutive wave numbers y_saw3 = FS_SawTooth(k_all3, tt_fs) k_max5 = 5 k_all5 = np.arange(1,k_max5+1) # list of consecutive wave numbers y_saw5 = FS_SawTooth(k_all5, tt_fs) k_max7 = 7 k_all7 = np.arange(1,k_max7+1) # list of consecutive wave numbers y_saw7 = FS_SawTooth(k_all7, tt_fs) k_max9 = 9 k_all9 = np.arange(1,k_max9+1) # list of consecutive wave numbers y_saw9 = FS_SawTooth(k_all9, tt_fs) plt.figure(2) plt.title("Saw Tooth Wave" "\n" "Fourier Series") plt.ylabel("Magnitude") plt.xlabel("Time") plt.plot(tt, [y/np.pi for y in y_saw3]) plt.plot(tt, [y/np.pi for y in y_saw7]) plt.plot(tt, [y/np.pi for y in y_saw9]) plt.plot(tt, y_saw, 'black') plt.ylim(-2, 2) plt.xlim(-1, 7) plt.show() # Plot Fourier Transform # Number of samplepoints N = 600 # sample spacing T = 1.0 / 800.0 x = np.linspace(0.0, N*T, N) yf = scipy.fftpack.fft(y_saw) xf = np.linspace(0.0, 1.0/(2.0*T), N/2) plt.figure(3) plt.title("Saw Tooth Wave" "\n" "Fourier Transform") plt.ylabel("Magnitude") plt.xlabel("Frequency (Hz)") plt.plot(xf, 2.0/N * np.abs(yf[:N//2])) plt.ylim(0, 0.6) plt.show()
true
dfe1438ad6dd0a609c88ec2a488a0cebf0475796
Python
DrDABBAD/Raspberry-pico-tetris-st7735
/test/graphistestremote.py
UTF-8
9,711
2.75
3
[ "MIT" ]
permissive
# Our supplier changed the 1.8" display slightly after Jan 10, 2012 # so that the alignment of the TFT had to be shifted by a few pixels # this just means the init code is slightly different. Check the # color of the tab to see which init code to try. If the display is # cut off or has extra 'random' pixels on the top & left, try the # other option! # If you are seeing red and green color inversion, use Black Tab # If your TFT's plastic wrap has a Black Tab, use the following: # tft.initR(INITR_BLACKTAB); // initialize a ST7735S chip, black tab # If your TFT's plastic wrap has a Red Tab, use the following: # tft.initR(INITR_REDTAB); // initialize a ST7735R chip, red tab # If your TFT's plastic wrap has a Green Tab, use the following: # tft.initR(INITR_GREENTAB); // initialize a ST7735R chip, green tab from ST7735 import TFT from sysfont import sysfont from machine import SPI, Pin import time import math spi = SPI(1, baudrate=20000000, polarity=0, phase=0, sck=Pin(10), mosi=Pin(11), miso=None) # def __init__( self, spi, aDC, aReset, aCS) : tft = TFT(spi, 3, 2, 4) tft.initg() tft.rgb(True) def testlines(color): tft.fill(TFT.BLACK) for x in range(0, tft.size()[0], 6): tft.line((0, 0), (x, tft.size()[1] - 1), color) for y in range(0, tft.size()[1], 6): tft.line((0, 0), (tft.size()[0] - 1, y), color) tft.fill(TFT.BLACK) for x in range(0, tft.size()[0], 6): tft.line((tft.size()[0] - 1, 0), (x, tft.size()[1] - 1), color) for y in range(0, tft.size()[1], 6): tft.line((tft.size()[0] - 1, 0), (0, y), color) tft.fill(TFT.BLACK) for x in range(0, tft.size()[0], 6): tft.line((0, tft.size()[1] - 1), (x, 0), color) for y in range(0, tft.size()[1], 6): tft.line((0, tft.size()[1] - 1), (tft.size()[0] - 1, y), color) tft.fill(TFT.BLACK) for x in range(0, tft.size()[0], 6): tft.line((tft.size()[0] - 1, tft.size()[1] - 1), (x, 0), color) for y in range(0, tft.size()[1], 6): tft.line((tft.size()[0] - 1, tft.size()[1] - 1), (0, y), color) def testfastlines(color1, color2): tft.fill(TFT.BLACK) for y in range(0, tft.size()[1], 5): tft.hline((0, y), tft.size()[0], color1) for x in range(0, tft.size()[0], 5): tft.vline((x, 0), tft.size()[1], color2) # def testgamerects(color): # tft.fill(TFT.WHITE) ## How is this screen not GameEngine # for i in range(game.height): # for j in range(game.width): # OK need shapes in gameEngine # TFT.rect(screen, GRAY, [ # game.x + game.zoom * j, game.y + game.zoom * i, game.zoom, game.zoom], 1) # if game.field[i][j] > 0: # TFT.rect(screen, colors[game.field[i][j]], # [game.x + game.zoom * j + 1, game.y + game.zoom * i + 1, game.zoom - 2, game.zoom - 1]) def testdrawrects(color): tft.fill(TFT.BLACK) for x in range(0, tft.size()[0], 6): tft.rect((tft.size()[0]//2 - x//2, tft.size() [1]//2 - x/2), (x, x), color) def testfillrects(color1, color2): tft.fill(TFT.BLACK) for x in range(tft.size()[0], 0, -6): tft.fillrect((tft.size()[0]//2 - x//2, tft.size()[1]//2 - x/2), (x, x), color1) tft.rect((tft.size()[0]//2 - x//2, tft.size() [1]//2 - x/2), (x, x), color2) def testfillcircles(radius, color): for x in range(radius, tft.size()[0], radius * 2): for y in range(radius, tft.size()[1], radius * 2): tft.fillcircle((x, y), radius, color) def testdrawcircles(radius, color): for x in range(0, tft.size()[0] + radius, radius * 2): for y in range(0, tft.size()[1] + radius, radius * 2): tft.circle((x, y), radius, color) def testtriangles(): tft.fill(TFT.BLACK) color = 0xF800 w = tft.size()[0] // 2 x = tft.size()[1] - 1 y = 0 z = tft.size()[0] for t in range(0, 15): tft.line((w, y), (y, x), color) tft.line((y, x), (z, x), color) tft.line((z, x), (w, y), color) x -= 4 y += 4 z -= 4 color += 100 def testroundrects(): tft.fill(TFT.BLACK) color = 100 for t in range(5): x = 0 y = 0 w = tft.size()[0] - 2 h = tft.size()[1] - 2 for i in range(17): tft.rect((x, y), (w, h), color) x += 2 y += 3 w -= 4 h -= 6 color += 1100 color += 100 def tftprinttest(): tft.fill(TFT.BLACK) v = 30 tft.text((0, v), "Hello World!", TFT.RED, sysfont, 1, nowrap=True) v += sysfont["Height"] tft.text((0, v), "Hello World!", TFT.YELLOW, sysfont, 2, nowrap=True) v += sysfont["Height"] * 2 tft.text((0, v), "Hello World!", TFT.GREEN, sysfont, 3, nowrap=True) v += sysfont["Height"] * 3 tft.text((0, v), str(1234.567), TFT.BLUE, sysfont, 4, nowrap=True) time.sleep_ms(1500) tft.fill(TFT.BLACK) v = 0 tft.text((0, v), "Hello World!", TFT.RED, sysfont) v += sysfont["Height"] tft.text((0, v), str(math.pi), TFT.GREEN, sysfont) v += sysfont["Height"] tft.text((0, v), " Want pi?", TFT.GREEN, sysfont) v += sysfont["Height"] * 2 tft.text((0, v), hex(8675309), TFT.GREEN, sysfont) v += sysfont["Height"] tft.text((0, v), " Print HEX!", TFT.GREEN, sysfont) v += sysfont["Height"] * 2 tft.text((0, v), "Sketch has been", TFT.WHITE, sysfont) v += sysfont["Height"] tft.text((0, v), "running for: ", TFT.WHITE, sysfont) v += sysfont["Height"] tft.text((0, v), str(time.ticks_ms() / 1000), TFT.PURPLE, sysfont) v += sysfont["Height"] tft.text((0, v), " seconds.", TFT.WHITE, sysfont) tft.fill(TFT.BLACK) tft.text((0, 0), "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur adipiscing ante sed nibh tincidunt feugiat. Maecenas enim massa, fringilla sed malesuada et, malesuada sit amet turpis. Sed porttitor neque ut ante pretium vitae malesuada nunc bibendum. Nullam aliquet ultrices massa eu hendrerit. Ut sed nisi lorem. In vestibulum purus a tortor imperdiet posuere. ", TFT.WHITE, sysfont, 1) time.sleep_ms(1000) #def rect( self, aStart, aSize, aColor ) : '''Draw a hollow rectangle. aStart is the smallest coordinate corner and aSize is a tuple indicating width, height.''' def testdrawarects(color): tft.fill(TFT.BLACK) tft.rect((tft.size()[0]//2 , tft.size() [1]//2) , (50, 100), color) tft.rect(( 1 ,1) , (127, 159), TFT.RED) print(str(tft.size()[0]//2)) def testgrid(): x= 4 y= 10 tft.fill(TFT.BLACK) for i in range(0,10): for j in range(0,20): # tft.rect(( x + 12 *i ,y+ 7*j -1 ),(120//10,155//20),TFT.RED ) # if (j % 2) == 1 and (i % 2) == 1 : time.sleep_ms(4000) for i in range(0,10): for j in range(0,20): tft.fillrect(( x + 12 *i +1 ,y+ 7* j ),(120//10-2,155//20-2),TFT.BLUE) time.sleep_ms(4000) def testblockColor(): vPos=80 fSize=2 aColor = TFT.RED tft.fill(TFT.BLACK) tft.text((8, vPos), "TFT.BLACK", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) aColor = TFT.BLACK tft.fill(TFT.RED) tft.text((8, vPos), "TFT.RED", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.MAROON) tft.text((8, vPos),"TFT.MAROON", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.GOLD) tft.text((8, vPos), "TFT.GOLD", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.GREEN) tft.text((8, vPos), "TFT.GREEN", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.FOREST) tft.text((8, vPos), "TFT.FOREST", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.BLUE) tft.text((8, vPos), "TFT.BLUE", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.NAVY) tft.text((8, vPos), "TFT.NAV", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.CYAN) tft.text((8, vPos), "TFT.CYAN", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.YELLOW) tft.text((8, vPos), "TFT.YELLOW", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.PURPLE) tft.text((8, vPos), "TFT.PURPLE", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.WHITE) tft.text((8, vPos), "TFT.WHITE", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.GRAY) tft.text((8, vPos), "TFT.GRAY", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) tft.fill(TFT.ORANGE) tft.text((8, vPos), "TFT.ORANGE", aColor, sysfont, fSize, nowrap=True) time.sleep_ms(1000) def test_main(): time.sleep_ms(1000) testgrid() testblockColor() # def test_main_org(): print("Start main") tft.fill(TFT.BLACK) tftprinttest() time.sleep_ms(4000) print("test Y line") testlines(TFT.YELLOW) time.sleep_ms(500) print("fast line") testfastlines(TFT.RED, TFT.BLUE) time.sleep_ms(500) testdrawrects(TFT.GREEN) time.sleep_ms(500) testfillrects(TFT.YELLOW, TFT.PURPLE) time.sleep_ms(500) tft.fill(TFT.BLACK) testfillcircles(10, TFT.BLUE) testdrawcircles(10, TFT.WHITE) time.sleep_ms(500) testroundrects() time.sleep_ms(500) testtriangles() time.sleep_ms(500) testdrawarects(TFT.GREEN) time.sleep_ms(500) testgrid() test_main()
true
71d0579d1eaab787e267103f0bc329a022ebfa02
Python
maayan20-meet/FinalProjBenandJerrys
/database.py
UTF-8
1,040
2.890625
3
[]
no_license
from model import Base, Store from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker engine = create_engine('sqlite:///stores.db?check_same_thread=False') Base.metadata.create_all(engine) DBSession = sessionmaker(bind=engine) session = DBSession() def add_store(name, city, street, phone): """Add a store to the DB.""" store = Store(name=name, city=city, street=street, phone=phone) session.add(store) session.commit() def query_all(): print('b') return session.query(Store).all() def get_store(name): """Find the first store in the DB, by thr name.""" return session.query(Store).filter_by(name=name).first() def query_by_city(city): return session.query(Store).filter_by(city=city).all() def remove_store(name): session.query(Store).filter_by(name=name).first().remove() session.commit() # add_store('abcd', '05000', 'Jerusalem', 'gfds') # add_store('1234', '12365', 'Jerusalem', 'sd') # add_store('asdf', '05000', 'Haifa', 'ds') # add_store('abcd', '05000', 'Haifa', 'qw')
true
484bcd15785f9ae4feb9afb36c3c2107434527dd
Python
dungmv56/Xierpa3
/xierpa3/components/container.py
UTF-8
3,212
2.640625
3
[ "MIT" ]
permissive
# -*- coding: UTF-8 -*- # ----------------------------------------------------------------------------- # xierpa server # Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com # # X I E R P A 3 # Distribution by the MIT License. # # ----------------------------------------------------------------------------- # # container.py # from xierpa3.components import Component from xierpa3.attributes import Perc, Margin from xierpa3.descriptors.blueprint import BluePrint from xierpa3.descriptors.media import Media # Include type of Style that holds @media parameters. class Container(Component): u"""The *Container* is the generic component that holds most other components on a page. Containers are always two-layered: a container @div@ to position on a page with a row @div@ inside that handles the responsive behavior of the content.""" # Get Constants->Config as class variable, so inheriting classes can redefine values. C = Component.C BLUEPRINT = BluePrint( # Page/Column paddingLeft=10, doc_paddingLeft=u'Padding left of main container.', paddingRight=10, doc_paddingRight=u'Padding right of main container.', # Style backgroundColor=('#fff'), doc_backgroundColor=u'Background color of the container.', # Row rowWidth=Perc(100), doc_rowWidth=u'Default width of a row inside a container.', rowMargin=Margin(0, C.AUTO), doc_margin=u'Row margin. This makes the main container (page) center on maxwidth.', rowMinWidth=C.M_MOBILE_MAX, doc_rowMinWidth=u'Minimum width of the row inside a container. Default is %d.' % C.M_MOBILE_MAX, rowMinWidthMobile=0, doc_rowMinWidthMobile=u'Minimum width of the row inside a container for mobile.', rowMaxWidth=C.MAXWIDTH, doc_rowMaxWidth=u'Maximum width of the row inside a container. Default is %d.' % C.MAXWIDTH, rowMaxWidthMobile=Perc(100), doc_rowMaxWidthMobile=u'Maximum width of the row inside a container for mobile.', rowOverflow=C.HIDDEN, doc_rowOverflow=u'Default overflow hidden inside a row inside a container.', rowFloat=C.NONE, doc_rowFloat=u'Default float none inside a row inside a container.', ) def buildBlock(self, b): u"""Build the container-div with a row-div inside.""" s = self.style b.div(class_=self.getClassName(), paddingleft=s.paddingLeft, paddingright=s.paddingRight, backgroundcolor=s.backgroundColor, media=Media(max=self.C.M_MOBILE_MAX, paddingleft=0, paddingright=0) ) b.div(class_=self.C.CLASS_ROW, width=s.rowWidth, margin=s.rowMargin, float=s.rowFloat, overflow=s.rowOverflow, maxwidth=s.rowMaxWidth, minwidth=s.rowMinWidth, media= # Container row has width 100% in media! Media(max=self.C.M_MOBILE_MAX, width=Perc(100), minwidth=s.rowMinWidthMobile, maxwidth=s.rowMaxWidthMobile, float=s.rowFloat), ) for component in self.components: component.build(b) b._div(comment='.'+self.C.CLASS_ROW) # Comment class at end of row b._div(comment='.'+self.getClassName()) # Comment the class at end of container
true
58f9367caeff97d697790683664d377696e5e3c3
Python
farmergirl13/PGSS-Team-Project
/hw3_stroopTest.py
UTF-8
3,823
3.4375
3
[]
no_license
#################################################### # 2017 PGSS CS HW3 #################################################### # Instructions: # https://docs.google.com/document/d/17fCC9mQ5j4UiGi-BQv4h8f1pQsZODcZr8lFZQOCwBrA # More colors: # https://wiki.tcl.tk/37701 #################################################### # # Fill in this section! # Author: Elizabeth Farmer # Collaborated with: # Sammi, # <GROUP MEMBER'S NAME>, # <GROUP MEMBER'S NAME> # (A reminder that there is a 4-person group limit.) # #################################################### from tkinter import * import random import copy #################################################### # Helper functions #################################################### # Utilize this function below to draw. (It is called for you already.) # Hint: You will want to pass in other parameters than the default. def getRootAndCanvas(width=300, height=300): root = Tk() canvas = Canvas(root, width=width, height=height) canvas.pack() return (root, canvas) #################################################### # stroopTest #################################################### def stroopTest(availableColors, rows, cols): winWidth = cols*100 winHeight = rows*50 (root, canvas) = getRootAndCanvas(winWidth, winHeight) randomWord = [] randomColor = [] randomWordTuple = [] #create the words and colors for row in range(rows): for col in range(cols): x = col*100 + 50 y = row*50 + 25 word = random.choice(availableColors) color = random.choice(availableColors) while(word==color): word = random.choice(availableColors) canvas.create_text(x, y, text=word, fill=color) randomWord.append(word) randomColor.append(color) randomTupleAddition = (word, color) randomWordTuple.append(randomTupleAddition) print(randomColor) result = randomWordTuple # Keep this line at the end! It makes your drawing actually display. root.mainloop() # Keep the return at the very end! return result #################################################### # Testing stroopTest (only what we can test...) #################################################### def testTupleListResult(testResult, availableColors, rows, cols): # check dimensions correctLength = rows * cols assert(len(testResult) == correctLength) for (wordText, colorOfWord) in testResult: # check that we only used the available colors assert(wordText in availableColors) assert(colorOfWord in availableColors) # check that the wordText and colorOfWord don't match assert(wordText != colorOfWord) def testStroopTest(): # simple test print("Testing simple test...") availableColors = ["red", "green", "blue"] rows = 3 cols = 4 testResultSimple = stroopTest(availableColors, rows, cols) testTupleListResult(testResultSimple, availableColors, rows, cols) print("Simple test passed!") print("(Make sure to check visual and printed results as well.)") # hard test print("\nTesting hard test...") availableColors = ["red", "orange", "yellow", "green", "blue", "purple", "gray", "black", "pink"] rows = 7 cols = 5 testResultHard = stroopTest(availableColors, rows, cols) testTupleListResult(testResultHard, availableColors, rows, cols) print("Hard test passed!") print("(Make sure to check visual and printed results as well.)") # silly test print("\nTesting silly test...") availableColors = ["cyan", "tomato", "khaki", "firebrick"] rows = 10 cols = 10 testResultSilly = stroopTest(availableColors, rows, cols) testTupleListResult(testResultSilly, availableColors, rows, cols) print("Silly test passed!") print("(Make sure to check visual and printed results as well.)") testStroopTest()
true
b0cb1da10148411775c82108eaef94e252751e3d
Python
virginiah894/python_codewars
/7KYU/get_factorial.py
UTF-8
138
3.21875
3
[ "MIT" ]
permissive
# from math import factorial as fact def factorial(n: int) -> int: return 1 if n <= 1 else n * factorial(n - 1) # return fact(n)
true
5818529d7f174496b19e559756ba3e5f3cc99ef6
Python
CronoxAU/Euler
/python/Problem18/problem18.py
UTF-8
723
3.796875
4
[]
no_license
#Solution to Project Euler problem 18 - https://projecteuler.net/problem=18 #Maximum path sum #Work from the bottom to the top #work through each position taking the highest number from the two below positions and adding that to the current position to produce the maximum sum for that position. #Once we work through the whole triangle the number in the top position should be the highest possible sum def maxSumTriangle(triData): #loop through the rows starting at the second bottom row for row in range(len(triData)-2, -1, -1): for position in range(0, len(triData[row])): triData[row][position] = max(triData[row+1][position], triData[row+1][position+1]) + triData[row][position] return triData[0][0]
true
f4d68c3f36ec30883f01e9453e0d9b021664f992
Python
neilb14/cryptotracker
/parsers/sample_parser.py
UTF-8
389
2.71875
3
[]
no_license
import pprint,re from datetime import datetime def parse(row): result = {'valid':True} date = datetime.strptime(row[0], '%d-%m-%y') result['date'] = date result['from_currency'] = row[1] result['to_currency'] = row[3] result['amount'] = float(row[4]) rate = re.sub('[",]', '', row[5]) result['rate'] = float(rate) result['charge'] = 0 return result
true
27f37b4b8b0de90a1ecea355261d3b064c64b05c
Python
jfinocchiaro/long-term-fair-mdps
/political-influence-refactored/src/platform_opt.py
UTF-8
6,356
2.859375
3
[]
no_license
#!/usr/bin/env python # coding: utf-8 """ Plartform optimizations, fair, half and unconstrained """ #############################################Required libraries#################################################### import numpy as np import cvxpy as cp from scipy.special import betainc import sims #############################################End#################################################################### # utility the platform has for user g clicking on article s # indexed by (article, user) unit_util = {(1,1) : 1., (1,-1) : 1., (-1,1) : 1., (-1,-1) : 1.} def psi(c, v, F): """ Set up ------ input :param c: dictionary indexed (g,s), cost for clicking by group and article :param v: dictionary indexed (g,s), value for sharing by group and article :param F: dictionary indexed (g,s), alpha and beta parameters for beta distribution output :return ps: dictionary indexed (g,s), """ ps = {} for g in [-1,1]: for s in [-1,1]: alpha, beta = F[(g,s)] c_ind = c[(g,s)] v_ind = v[(g,s)] ps[(g,s)] = 1 - betainc(alpha, beta, float(c_ind) / v_ind) return ps def l(g,s,t, pi, theta, q, c,v,F): """ Set up definition of l from the paper #currently done by recursion; seems to be more efficient in closed form, but want to confirm it is correct before I switch code. ------ input :param g: int, group affliation 1 or -1 :param s: int, affliation of the article source 1, -1 :param t: int, time step :param pi: dictionary, proportion of users in each group :param theta: dictionary, indexed by g, probability of shown a user in group g article s :param q: int, total number of iterations :param c: dictionary indexed (g,s), cost for clicking by group and article :param v: dictionary indexed (g,s), value for sharing by group and article :param F: dictionary indexed (g,s), alpha and beta parameters for beta distribution """ ps = psi(c,v,F) ti = (g+1) / 2 if t >= 2: return ((q[g] * l(g,s,t-1, pi, theta, q,c,v,F)) + (1 - q[-g]) * l(-g,s,t-1, pi, theta, q,c,v,F)) * ps[(g,s)] else: if s == -1: return pi[g] * theta[ti] * ps[(g,-1)] if s == 1: return pi[g] * (1 - theta[ti]) * ps[(g,1)] def opt(policy, pi, q, T, c,v,F, epsilon = 0.1, exposure_e = 0.0, delta_low = 0.5, delta_high=2, u=unit_util): """ Set up the optimization function ------ input :param policy: string, whether optimization is unconstrained or fair: additive, ratio :param pi: dictionary, proportion of users in each group :param q: dictionary, the homophily variable :param T: int, max total number of timesteps :param epsilon: double, level of fairness violation allowed :param c: dictionary indexed (g,s), cost for clicking by group and article :param v: dictionary indexed (g,s), value for sharing by group and article :param F: dictionary indexed (g,s), alpha and beta parameters for beta distribution :param u: dictionary, platform's utility for a click which can be thought of as a price charged. output :return ps: 1d-array of shape (n_features, 1), weight of the classifier """ # varaible theta_A, theta_B theta = cp.Variable(2) # define the optimization objective objective = cp.Maximize(cp.sum([u[(1,1)] * l(1,1,t, pi, theta, q, c,v,F) + u[(-1,1)] * l(-1,1,t,pi, theta, q, c,v,F) \ + u[(1,-1)] * l(1,-1,t, pi, theta, q, c,v,F) + u[(-1,-1)] * l(-1,-1,t, pi, theta, q, c,v,F) for t in range(T)])) # define the unconstrained constraint constraints_theta = [exposure_e <= theta[0], theta[0] <= 1 - exposure_e, exposure_e <= theta[1], theta[1] <= 1 - exposure_e] constraints = [] if policy == 'additive': #generate eta, used as constraints. eta = {} for s in [-1,1]: for g in [-1,1]: ti = (g+1) / 2 #theta index.... moving from -1 to 0 and 1 to 1 if s == 1: eta[(s,g)] = pi[g] * theta[ti] + sum([ l(g,s,t, pi, theta,q,c,v,F) * q[g] + l(-g,s,t, pi, theta,q,c,v,F) * (1 - q[-g]) for t in range(1,T)]) else: eta[(s,g)] = pi[g] * (1-theta[ti]) + sum([ l(g,s,t, pi, theta,q,c,v,F) * q[g] + l(-g,s,t, pi, theta,q,c,v,F) * (1 - q[-g]) for t in range(1,T)]) constraints_eta = [eta[(1,1)] - eta[(-1,-1)] <= epsilon, eta[(-1,-1)] - eta[(1,1)] <= epsilon] constraints = constraints_theta+constraints_eta elif policy == 'ratio': constraints_ratio = [] constraints_ratio.append(delta_low * sum([l(-1,-1,t, pi, theta, q, c,v,F) for t in range(1,T+1)]) <= sum([l(1,1,t, pi, theta, q, c,v,F) for t in range(1,T+1)])) constraints_ratio.append(sum([l(1,1,t, pi, theta, q, c,v,F) for t in range(1,T+1)]) <= delta_high * sum([l(-1,-1,t, pi, theta, q, c,v,F) for t in range(1,T+1)])) constraints_ratio.append(delta_low * sum([l(-1,1,t, pi, theta, q, c,v,F) for t in range(1,T+1)])<= sum([l(1,-1,t, pi, theta, q, c,v,F) for t in range(1,T+1)])) constraints_ratio.append(sum([l(1,-1,t, pi, theta, q, c,v,F) for t in range(1,T+1)])<= delta_high * sum([l(-1,1,t, pi, theta, q, c,v,F) for t in range(1,T+1)])) constraints = constraints_theta + constraints_ratio else: constraints = constraints_theta prob = cp.Problem(objective, constraints) prob.solve() if prob.solve is not None and theta.value is not None: th = {} th[-1] = max(min(theta.value[1], 1.), 0.) th[1] = max(min(theta.value[0], 1.), 0.) else: th = {1:0, -1:0} print("Constraints not feasible") # quit() return th #test code if __name__ == '__main__': dataset_name = 'twitter_abortion' T = 5 pi,beta_dist,P,v,c,q = sims.get_params(dataset_name) print(opt('additive',pi, q, T, c,v,beta_dist))
true
17b6588d08291512ecb362fe018036601a931364
Python
srcole/sdburritodash
/create_df.py
UTF-8
2,942
2.953125
3
[]
no_license
import pandas as pd import geocoder import numpy as np # Get data from Google Sheet url = 'https://docs.google.com/spreadsheet/ccc?key=18HkrklYz1bKpDLeL-kaMrGjAhUM6LeJMIACwEljCgaw&output=csv' df = pd.read_csv(url) # Make lower df.Location = df.Location.str.lower().str.strip() df.Reviewer = df.Reviewer.str.lower().str.strip() df.Burrito = df.Burrito.str.lower().str.strip() # Delete unreliable ratings df = df[(df.Unreliable != 'x') & (df.Unreliable != 'X')] # Delete ratings outside of San Diego df = df[(df.NonSD != 'x') & (df.NonSD != 'X')] df.reset_index(drop=True, inplace=True) # Only keep columns of interest cols_keep = ['Location', 'Burrito', 'Date', 'URL', 'Yelp', 'Google', 'Address', 'Neighborhood', 'Cost', 'Volume', 'Tortilla', 'Temp', 'Meat', 'Fillings', 'Meat:filling', 'Uniformity', 'Salsa', 'Synergy', 'Wrap', 'overall', 'Reviewer'] df_burritos = df[cols_keep] # Get average ratings for each restaurant avg_cols = ['Cost', 'Volume', 'Tortilla', 'Temp', 'Meat', 'Fillings', 'Meat:filling', 'Uniformity', 'Salsa', 'Synergy', 'Wrap', 'overall'] df_rest_avg = df_burritos.groupby('Location').mean()[avg_cols].reset_index() # Get address about each restaurant add_cols = ['Address', 'Neighborhood'] df_rest_add = df_burritos.groupby('Location').first()[add_cols].reset_index() # Get info about each restaurant info_cols = ['URL', 'Yelp', 'Google'] df_rest_info = df_burritos.groupby('Location').first()[info_cols].reset_index() # Get count of number of burritos rated df_rest_count = pd.DataFrame(df_burritos.groupby('Location')['Burrito'].count()).reset_index().rename({'Burrito': 'N'}, axis=1) # Optional future features: # Get most popular burrito # Get date most recently rated # Get most favorable reviewer # Get lat and long addresses = df_rest_add['Address'] + ', ' + \ df_rest_add['Neighborhood'] + ', San Diego, CA' lats = np.zeros(len(addresses)) longs = np.zeros(len(addresses)) for i, address in enumerate(addresses): g = geocoder.google(address) Ntries = 1 while g.latlng == []: if 'Marshall College' in address: address = '9500 Gilman Drive, La Jolla, CA' g = geocoder.google(address) print(str(i) + '/' + str(len(lats)) + ' Attempt: ' + str(Ntries) + ' Address:' + address) Ntries += 1 lats[i], longs[i] = g.latlng # # Check for nonsense lats and longs # if sum(np.logical_or(lats > 34, lats < 32)): # raise ValueError('Address not in san diego') # if sum(np.logical_or(longs < -118, longs > -117)): # raise ValueError('Address not in san diego') # Incorporate lats and longs into restaurants data df_rest_add['Latitude'] = lats df_rest_add['Longitude'] = longs # Merge restaurant df df_rest = df_rest_avg.merge(df_rest_info, on='Location').merge(df_rest_count, on='Location').merge(df_rest_add[['Location','Latitude','Longitude']], on='Location') df_rest.to_csv('burrito_data_shops.csv')
true
64ebcd487e7dc7672d9df792e17b9b6375d5aa8c
Python
pelthe/random
/Python/test.py
UTF-8
67
2.9375
3
[]
no_license
import math word1 = "key" print(word1) print("The word was ",word1)
true
210588a8948c2daafe96bb5b5d2a37959bee4817
Python
narru888/PythonWork-py37-
/Web網頁框架/框架(Django Rest Framework)/200505_DRF(版本、解析器)/mysite/api/views.py
UTF-8
1,953
3.015625
3
[]
no_license
from django.shortcuts import render, HttpResponse from rest_framework.views import APIView from rest_framework.versioning import BaseVersioning, QueryParameterVersioning, URLPathVersioning from rest_framework.parsers import JSONParser, FormParser, MultiPartParser class UsersView(APIView): """ QueryParameterVersioning:透過URL中的get參數傳參 URLPathVersioning:在URL的路徑中傳參 """ # versioning_class = URLPathVersioning # 局部使用版本。(不是列表,跟驗證等不一樣) def get(self, request, *args, **kwargs): print(request.version) # 獲取到的版本號 print(request.versioning_scheme) # 所使用的版本類 # 透過版本類中的reverse方法反向生成出當前url url = request.versioning_scheme.reverse(viewname='uuu', request=request) print(url) return HttpResponse('用戶列表') class ParserView(APIView): """ JSONParser:只能解析json數據 - 'Content-Type': 'application/json' - 數據格式: {"name":"jj", "age":18} FormParser:只能解析 HTML 表单内容 - 'Content-Type': 'application/x-www-form-urlencoded' - 數據格式: name=jamie&age=18&height=179 MultiPartParser:只能解析更多部分HTML表单内容,文件上傳也能使用 - 'Content-Type': 'multipart/form-data'' """ # parser_classes = [JSONParser, FormParser, MultiPartParser] # 局部使用解析器 def post(self, request, *args, **kwargs): """ 1. 獲取用戶請求 2. 獲取用戶請求體 3. 獲取用戶請求頭並和parser_classes進行比對,選擇要用的解析器 4. 解析器對請求體進行解析 5. 數據封裝到request.data """ print(request.data) # 封裝了經解析過後的數據 return HttpResponse('ParserView')
true
2a9e525384636ce02a69a1ec06fd52e849969ef9
Python
palisadoes/pattoo
/pattoo/db/schema/chart_datapoint.py
UTF-8
3,749
2.765625
3
[ "GPL-3.0-only" ]
permissive
"""pattoo ORM Schema for the DataPoint table.""" # PIP3 imports import graphene from graphene_sqlalchemy import SQLAlchemyObjectType # pattoo imports from pattoo.db import db from pattoo.db.models import ChartDataPoint as ChartDataPointModel from pattoo.db.schema import utils from pattoo_shared.constants import DATA_INT class ChartDataPointAttribute(): """Descriptive attributes of the ChartDataPoint table. A generic class to mutualize description of attributes for both queries and mutations. """ idx_chart_datapoint = graphene.String( description='ChartDataPoint index.') idx_datapoint = graphene.String( description='DataPoint table foreign key') idx_chart = graphene.String( description='Chart table foreign key.') enabled = graphene.String( description='True if enabled.') class ChartDataPoint(SQLAlchemyObjectType, ChartDataPointAttribute): """ChartDataPoint node.""" class Meta: """Define the metadata.""" model = ChartDataPointModel interfaces = (graphene.relay.Node,) class CreateChartDataPointInput( graphene.InputObjectType, ChartDataPointAttribute): """Arguments to create a ChartDataPoint entry.""" pass class CreateChartDataPoint(graphene.Mutation): """Create a ChartDataPoint Mutation.""" chart_datapoint = graphene.Field( lambda: ChartDataPoint, description='ChartDataPoint created by this mutation.') class Arguments: Input = CreateChartDataPointInput(required=True) def mutate(self, info_, Input): data = _input_to_dictionary(Input) chart_datapoint = ChartDataPointModel(**data) with db.db_modify(20147, close=False) as session: session.add(chart_datapoint) return CreateChartDataPoint(chart_datapoint=chart_datapoint) class UpdateChartDataPointInput( graphene.InputObjectType, ChartDataPointAttribute): """Arguments to update a ChartDataPoint entry. InputFields are used in mutations to allow nested input data for mutations To use an InputField you define an InputObjectType that specifies the structure of your input data """ # Provide a description of the ID idx_chart_datapoint = graphene.String( required=True, description='ChartDataPoint index value.') class UpdateChartDataPoint(graphene.Mutation): """Update a ChartDataPoint entry.""" chart_datapoint = graphene.Field( lambda: ChartDataPoint, description='ChartDataPoint updated by this mutation.') class Arguments: Input = UpdateChartDataPointInput(required=True) def mutate(self, info_, Input): data = _input_to_dictionary(Input) # Update database with db.db_modify(20145) as session: session.query(ChartDataPointModel).filter_by( idx_chart_datapoint=data['idx_chart_datapoint']).update(data) # Get code from database with db.db_query(20146, close=False) as session: chart_datapoint = session.query(ChartDataPointModel).filter_by( idx_chart_datapoint=data['idx_chart_datapoint']).first() return UpdateChartDataPoint(chart_datapoint=chart_datapoint) def _input_to_dictionary(input_): """Convert. Args: input_: GraphQL "data" dictionary structure from mutation Returns: result: Dict of inputs """ # 'column' is a dict of DB model 'non string' column names and their types column = { 'idx_chart_datapoint': DATA_INT, 'idx_datapoint': DATA_INT, 'idx_chart': DATA_INT, 'enabled': DATA_INT } result = utils.input_to_dictionary(input_, column=column) return result
true
1390571eed87cb94833dfb0c23e5ae0391a31e06
Python
zanixus/py-hw-mcc
/creditcard_km.py
UTF-8
2,301
4.46875
4
[]
no_license
#!/usr/bin/python3 """ Kevin M. Mallgrave Professor Janet Brown-Sederberg CTIM-285 W01 05 Apr 2019 This is a modular Python script that checks the validity of a credit card number. It checks length and rejects bad input and non-digit strings. It uses the Luhn algorithm to check credit card validity, and will tell you what type of credit card it is when given a proper number. If the user enters the wrong format, it will trap them in a loop to ensure they enter a value that can be checked against the Luhn algorithm to see if it is valid. """ def int_input(): good_input = False while good_input == False: good_value = input() try: val = int(good_value) good_input = True except ValueError: print("Error: characters entered. Please enter whole numbers.") return good_value def get_number(): cc = "" while len(cc) < 13 or len(cc) > 16: print("Please enter a valid credit card number, 13-16 digits.") print("The number must begin with 4, 5, 6, or 37.") cc = int_input() is_valid = valid_length(cc) if is_valid == False: cc = "" return cc def valid_length(cc): is_valid = False if len(cc) < 13 or len(cc) > 16: is_valid = False if cc[0] == "4": is_valid = True if cc[0] == "5": is_valid = True if cc[0] == "6": is_valid = True if cc[0] == "3" and cc[1] == "7": is_valid = True return is_valid def check_luhn(cc): second_num = len(cc) % 2 cc_sum = 0 counter = enumerate([int(i) for i in cc]) for i, num in counter: if i % 2 == second_num: num *= 2 if num > 9: num -= 9 cc_sum += num return cc_sum % 10 == 0 def main(): cc = get_number() card_type = "unknown" if cc[0] == "4": card_type = "Visa" if cc[0] == "5": card_type = "MasterCard" if cc[0] == "6": card_type = "Discover" if cc[0] == "3" and cc[1] == "7": card_type = "American Express" is_valid = check_luhn(cc) if is_valid == True: print("This is a valid " + card_type + " credit card number.") else: print("This is not a valid credit card number.") main()
true
b80681fddc14574c8a0f77be6d073ae6b0365d9d
Python
Justus-M/dsti-metaheuristics-justus-mulli
/griewank/griewank.py
UTF-8
780
2.9375
3
[]
no_license
import pandas as pd import numpy as np from scipy.optimize import minimize import time import matplotlib.pyplot as plt def griewank(x): z = x - shift[:len(x)] val = (sum(z**2)/4000)-np.cumprod(np.cos(z/np.sqrt(np.arange(len(z))+1))).values[-1]+1+bias converge.append(val) return val def minimize_griewank(dim, shifts, biases): start = time.time() global shift, bias, converge shift = shifts['griewank'] bias = biases['griewank'].values[0] converge = [] x = np.zeros(dim) result = minimize(griewank, x, options={'gtol':0.1}) print(f'done in {time.time() - start} seconds.') plt.plot(converge) plt.xlabel('# of evaluations') plt.ylabel('value') plt.title(f'Griewank convergence plot - dim {dim}') return result
true
7771e82cf40c916d4b0401fef487852fc94d525c
Python
knighton/sunyata_2017
/sunyata/backend/base/layer/dot/separable_conv.py
UTF-8
989
2.578125
3
[]
no_license
from ...base import APIMixin class BaseSeparableConvAPI(APIMixin): def __init__(self): APIMixin.__init__(self) def separable_conv(self, x, depthwise_kernel, pointwise_kernel, bias, stride, pad, dilation): raise NotImplementedError def separable_conv1d(self, x, depthwise_kernel, pointwise_kernel, bias, stride, pad, dilation): raise NotImplementedError def separable_conv2d(self, x, depthwise_kernel, pointwise_kernel, bias, stride, pad, dilation): raise NotImplementedError def separable_conv3d(self, x, depthwise_kernel, pointwise_kernel, bias, stride, pad, dilation): raise NotImplementedError def separable_conv_out_shape(self, in_shape, out_channels, face, stride, pad, dilation): return self.conv_out_shape( in_shape, out_channels, face, stride, pad, dilation)
true
d6bc2960fbe30fd0121bfab329f6c910da127206
Python
the-astronot/Project-Doge
/src/Node.py
UTF-8
789
3.0625
3
[]
no_license
class Node(): def __init__(self, bias, prev_weights = None, value = None): self.bias = float(bias) if prev_weights is None: self.weights = [] else: self.weights = [] for x in prev_weights: self.weights.append(float(x)) if value is None: self.value = 0.0 else: self.value = float(value) # GETTERS def get_bias(self): return self.bias def get_value(self): return self.value def get_weight(self, x): return self.weights[x] def get_weights(self): return self.weights # SETTERS def set_value(self, value): self.value = value def set_weights(self, weights): self.weights = weights def set_weight(self, index, value): self.weights[index] = value def set_bias(self, bias): self.bias = bias
true
a6439528bccfd44b249d2c8b31a012ce01f9ec17
Python
daretogo/find_common_pandas_flask
/compare_data.py
UTF-8
3,076
2.828125
3
[]
no_license
import pandas, pandas_usaddress, pdb, flask from flask import Flask, request, render_template from flask import Flask app = Flask(__name__) ############################################################################################################################# def Comparison(newv_filename, newv_sheet, newv_street_column, newv_city_column, newv_zip_column, homeb_filename, homeb_sheet, homeb_street_column, homeb_city_column, homeb_zip_column, result_filename): #read in the new visitor list df_newv = pandas.read_excel(newv_filename, sheet_name=newv_sheet) #parse the new visitor list into standardized address bits based on the existing address fields df_newv_parsed = pandas_usaddress.tag(df_newv, [newv_street_column, newv_city_column, newv_zip_column], granularity='medium', standardize=True) #read in the home buyers list df_homebuy = pandas.read_excel(homeb_filename, sheet_name=homeb_sheet) #parse homebuyer list into standardized address bits too. df_homebuy_parsed = pandas_usaddress.tag(df_homebuy, [homeb_street_column, homeb_city_column, homeb_zip_column], granularity='medium', standardize=True) #the magic line - perform the inner join matching on address number, city name (placename) and the street name. This #effectively eliminates missed matches due to Cir vs Cr or St vs Street comparisons in the address as we have extracted those values away. df_merged = pandas.merge(df_homebuy_parsed, df_newv_parsed, on=['AddressNumber', 'PlaceName', 'StreetName'], how='inner') writer = pandas.ExcelWriter(result_filename, engine='xlsxwriter') df_merged.to_excel(writer, sheet_name='Parsed and Merged Raw Data') # Close the Pandas Excel writer and output the Excel file. writer.save() return(df_merged) ##################################################################################################################################### @app.route('/pdb') def pdb(): """Enter python debugger in terminal""" import sys print("\n'/pdb' endpoint hit. Dropping you into python debugger. globals:") print("%s\n" % dir(sys.modules[__name__])) import pdb; pdb.set_trace() return 'After PDB debugging session, now execution continues...' @app.route("/", methods=['GET', 'POST']) def index(): if request.method == 'GET': return render_template('index.html') if request.method == 'POST': #this creates a dictionary of form_inputs with a kvp from the form in the template form_inputs = request.form #this executes the comparison function with the supplied data from the user Comparison(form_inputs['newvfilename'],form_inputs['newvsheetname'],form_inputs['newv_street'],form_inputs['newv_city'],form_inputs['newv_zip'],form_inputs['homebuyersfilename'],form_inputs['homebsheetname'],form_inputs['homeb_street'],form_inputs['homeb_city'],form_inputs['homeb_zip'],form_inputs['resultpath']) return render_template( 'result.html') if __name__ == "__main__": app.run(host='0.0.0.0', port=80)
true
e6aa9b6d7b9c15916cebabf6ee155b87b6db9cd3
Python
Jasonsey/Fern
/fern/data/data_tokenize.py
UTF-8
6,641
3.40625
3
[ "Apache-2.0" ]
permissive
# Fern # # Author: Jason Lin # Email: jason.m.lin@outlook.com # # ============================================================================= """data tokenize""" from typing import * import re from collections import Counter import jieba import pandas as pd from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer SPECIAL_TOKEN = [ '[pad]', # 占位符 '[cls]', # 分类符 '[sep]', # 分割符号 '[ukn]', # 未知token ] def str2word(string: str, zh_segmentation=True) -> List[str]: """ 分割字符串为词列表,支持的语言:中文、英文、数字、特殊符号("-_.'[]"),未支持的语言会被自动过滤 操作顺序: 1. 拆分中文和其他语言 2. 除了中文、英文、数字、特殊符号("-_.'[]")外,都作为分词分界线 3. 针对中文:使用jieba分词 4. 针对英文、数字:直接合成 Args: string: the cleaned data zh_segmentation: 中文是否分词,默认使用结巴分词;否则按字分词 Returns: cleaned word list """ string = replace_punctuation(string, ' ') string = re.sub(r'(\[)([A-Z]+?)(])', r' \1\2\3 ', string) # 确保特殊token的安全: xxx[yyy]zzz -> xxx [yyy] zzz if zh_segmentation: string = re.sub(r'([\u4e00-\u9fa5]+)', r' \1 ', string) else: string = re.sub(r'([\u4e00-\u9fa5])', r' \1 ', string) string_list = re.split(r"\s+", string) if zh_segmentation: words = [] for item in string_list: if not item: continue if re.match(r'[\u4e00-\u9fa5]', item): tmp = [word for word in jieba.cut(item) if word] else: tmp = [item] words.extend(tmp) else: words = re.split(r"\s+", string) return words def replace_punctuation(string: str, repl: str = ' '): """ 替换标点符号为指定的字符, 注意这部分不处理英文中括号和连词符("[]'’"),这个一般用于特殊token分界符和词缩写 Args: string: 要被处理的原始字符串 repl: 标点符号要替换成的字符 Returns: 替换好的字符串 """ p = re.compile(r'[,./;\-=\\`!@#$%^&*()_+|~,。、;【】!¥…()]') string = p.sub(repl, string) return string def generate_label_data( data: pd.DataFrame, label_col: str, ) -> Union[LabelBinarizer, MultiLabelBinarizer, Dict[str, Union[LabelBinarizer, MultiLabelBinarizer]]]: """ 利用sklearn工具加载标签字典,支持标签列的格式如下: 1. 多任务:{'task1': [label1, label2], 'task2': label1} 2. 单任务多标签: [label1, label2] 3. 单任务单标签: label1 Args: data: 带有标签列的data frame label_col: 标签列名字 Returns: 返回数据格式如下: 1. 多任务:{'task1': MultiLabelBinarizer, 'task2': LabelBinarizer} 2. 单任务多标签: MultiLabelBinarizer 3. 单任务单标签: LabelBinarizer Raises: TypeError: 如果数据类型不正确,那么就会报错 """ label_example = data.loc[0, label_col] if isinstance(label_example, dict): # 多任务 labels = {} def add_label(item): for key_ in item: if key_ in labels: labels[key_].append(item[key_]) else: labels[key_] = [item[key_]] data[label_col].map(add_label) encoder = {} for key in labels: if isinstance(label_example[key], (list, tuple)): # 多标签 encoder_ = MultiLabelBinarizer() encoder_.fit(labels[key]) encoder[key] = encoder_ elif isinstance(label_example[key], str): # 单标签 encoder_ = LabelBinarizer() encoder_.fit(labels[key]) encoder[key] = encoder_ elif isinstance(label_example, (list, tuple)): # 单任务多标签 labels = data[label_col].to_list() encoder = MultiLabelBinarizer() encoder.fit(labels) elif isinstance(label_example, str): # 单任务单标签 labels = data[label_col].to_list() encoder = LabelBinarizer() encoder.fit(labels) else: raise TypeError('标签类型无法被支持') return encoder def generate_word_library(data: pd.DataFrame, data_col: str, top: Optional[int] = None) -> Tuple[dict, dict]: """ 从数据集中加载词库,要求data_col列中的数据格式都是字符串 Args: data: the data frame where the source data is stored. data_col: 数据加载源的列名,要求这个列里面都是字符串 top: 按照词频排序,只提取前n个词。默认提取全部 Returns: word2id, id2word字典 Raises: ValueError: if no data provide, ValueError will be raised """ if data is None: raise ValueError('No data is provided.') words = [] words_append = words.append def _map_func(word_list): for word in word_list: word = word.strip().lower() if word and word[0] != '#': words_append(word) data[data_col].map(_map_func) counter = Counter(words) res = counter.most_common(n=top) res = [(token, 0) for token in SPECIAL_TOKEN] + res word2id = {item[0]: idx for idx, item in enumerate(res)} id2word = {idx: item[0] for idx, item in enumerate(res)} return word2id, id2word def limit_token_length(tokens: list, n: int, strategy: str = 'tail') -> list: """ 限制token的长度. 如果token列表长度超过n, 限制策略如下: 1. head: 截取前n个token 2. head+tail: 截取前n/3和后2n/3个token,并进行组合 3. tail: 截取后n个token; 默认值 注意: 不建议在训练或者测试的时候移除token数量过多的样本,而是推荐截取的方式. 这样可以尽量训练和测试的样本格式的一致 Args: tokens: 需要截取的token列表 n: 最大列表长度 strategy: 截取策略 Returns: 截取之后的token列表 """ if len(tokens) <= n: return tokens if strategy == 'head+tail': head = n//3 res = tokens[:head] + tokens[head-n:] elif strategy == 'head': res = tokens[:n] else: res = tokens[-n:] return res
true
d93397ef41845e2059a7b48c3bac30416d2a8cc5
Python
jf20541/CointegratedPairsTrading
/src/main.py
UTF-8
1,186
3.625
4
[ "MIT" ]
permissive
import pandas as pd import config from sklearn.linear_model import LinearRegression from statsmodels.tsa.stattools import adfuller import matplotlib.pyplot as plt df = pd.read_csv(config.TRAINING_FILE) ETH = df["ETH"].values.reshape(-1, 1) BTC = df["BTC"].values.reshape(-1, 1) def hedge_ratio(dv, iv): """Hedge Ratio is the proportion that lets us create a neutral position Args: dv [float-array]: BTC price iv [float-array]: ETH price Returns: [float-array]: calculated Hedge Ratio from LinearRegression (Slope) """ lr = LinearRegression() lr.fit(dv, iv) hedge_ratio = lr.coef_[0][0] intercept = lr.intercept_[0] return hedge_ratio, intercept def spread_adf(): """ Spread: linear combination of two series with a hedge ratio (H0): Have statioary timeseries and hence a cointegrated pair (H1): Not H0 """ spread = (ETH - BTC) * hedge_ratio(BTC, ETH)[0] result = adfuller(spread) if result[1] <= 0.05: print(f"Spread is Stationary and P-value: {result[1]}") else: print(f"Spread is Not Stationary: P-value: {result[1]}") if __name__ == "__main__": spread_adf()
true
5a56bce582568b445f98a8b6e99785314636115f
Python
andreagonz/cripto-tareas
/tareas/tarea2/src/ej4/cifrado.py
UTF-8
6,165
3.421875
3
[]
no_license
''' Andrea Itzel González Vargas Carlos Gerardo Acosta Hernández ''' import sys import os from math import floor ''' Clase que cifra y descifra mensajes con los esquemas de cifrado cesar, afin, mezclado y vigenere ''' class Cifrado: ''' Constructor de la clase ''' def __init__(self, clave, entrada): self.clave = clave self.entrada = entrada ''' Función que cifra el mensaje de acuerdo con el esquema indicado ''' def cifra(self, esquema): res = "" if esquema == "cesar": res = self.cifra_cesar() elif esquema == "afin": res = self.cifra_afin() elif esquema == "mezclado": res = self.cifra_mezclado() elif esquema == "vigenere": res = self.cifra_vigenere() else: print("Esquema de cifrado no reconocido") sys.exit(0) return res ''' Función que descifra el mensaje de acuerdo con el esquema indicado ''' def descifra(self, esquema): res = "" if esquema == "cesar": res = self.descifra_cesar() elif esquema == "afin": res = self.descifra_afin() elif esquema == "mezclado": res = self.descifra_mezclado() elif esquema == "vigenere": res = self.descifra_vigenere() else: print("Esquema de descifrado no reconocido") sys.exit(0) return res def cifra_cesar(self): out = "" for x in self.entrada: ni = ord(x) + int(self.clave) if ni > 255: out = out + chr(ni % 256) else: out = out + chr(ni) return out def descifra_cesar(self): out = "" for x in self.entrada: ni = ord(x) - int(self.clave) if ni < 0: out = out + chr(ni % 256) else: out = out + chr(ni) return out def mcd(self, a, b): if b == 0: return a else: return self.mcd(b, a % b) def inverso(self, a): for i in range(256): if (a * i) % 256 == 1: return i return 0 def clave_afin(self): lst = self.clave.replace(" ", "").replace("\n", "").split(',') if len(lst) < 2 or not lst[0].isdigit() or not lst[1].isdigit(): print("Clave inválida") sys.exit(0) r = int(lst[0]) k = int(lst[1]) if self.mcd(r, 256) != 1: print("El primer número de la clave debe ser primo relativo con 256") sys.exit(0) return (r, k) def cifra_afin(self): lst = self.clave_afin() r = lst[0] k = lst[1] res = "" for c in self.entrada: res += chr((r * ord(c) + k) % 256) return res def descifra_afin(self): lst = self.clave_afin() r = lst[0] k = lst[1] res = "" for c in self.entrada: res += chr(((ord(c) - k) * self.inverso(r)) % 256) return res def cifra_mezclado(self): dicc = {} res = "" lst = self.clave.split('\n') if len(lst) < 2 or len(lst[0]) != len(lst[1]): print("Clave inválida") sys.exit(0) for i in range(len(lst[0])): k = {lst[0][i] : lst[1][i]} if dicc.get(lst[0][i], None) != None: print("No repetir carácteres en la clave") sys.exit(0) dicc.update(k) for c in self.entrada: if dicc.get(c, None) != None: res += dicc.get(c) else: res += c return res def descifra_mezclado(self): dicc = {} res = "" lst = self.clave.split('\n') if len(lst) < 2 or len(lst[0]) != len(lst[1]): print("Clave inválida") sys.exit(0) for i in range(len(lst[0])): k = {lst[1][i] : lst[0][i]} if dicc.get(lst[1][i], None) != None: print("No repetir carácteres en la clave") sys.exit(0) dicc.update(k) for c in self.entrada: if dicc.get(c, None) != None: res += dicc.get(c) else: res += c return res def cifra_vigenere(self): out = "" m = len(self.clave) pos = 0 for x in self.entrada: if pos > m - 1: pos = 0 ni = ord(x) + ord(self.clave[pos]) if ni > 255: out = out + chr(ni % 256) else: out = out + chr(ni) pos += 1 return out def descifra_vigenere(self): out = "" m = len(self.clave) pos = 0 for x in self.entrada: if pos > m - 1: pos = 0 ni = ord(x) - ord(self.clave[pos]) if ni < 0: out = out + chr(ni % 256) else: out = out + chr(ni) pos += 1 return out def escribe_archivo(nom, arch): try: na = open(nom, "w") na.write(arch) na.close() except: print("Error al crear archivo " + nom) if len(sys.argv) < 5: print("Uso del programa:\npython3 cifrado.py [c|d] [cesar|afin|mezclado|vigenere] <archivoClave> <archivoEntrada>") else: cd = sys.argv[1] esquema = sys.argv[2] clave = sys.argv[3] entrada = sys.argv[4] try: archk = open(clave, "r") clave = archk.read() archk.close() arche = open(entrada, "r") entrada = arche.read() arche.close() except: print("Archivo(s) inválido(s)") cifrado = Cifrado(clave, entrada) if cd == "c": res = cifrado.cifra(esquema) escribe_archivo(os.path.splitext(sys.argv[4])[0] + ".cifrado", res) elif cd == "d": res = cifrado.descifra(esquema) escribe_archivo(os.path.splitext(sys.argv[4])[0] + ".descifrado", res) else: print("Argumento '" + cd + "' no reconocido")
true
a4daf1682bbc4d5860be5a8826fec498bee2083c
Python
akhawaja2014/Deep-learning-for-image-registration
/feature_extraction/FirsttutorialImageregistration.py
UTF-8
1,554
2.515625
3
[]
no_license
import numpy as np import cv2 import matplotlib.pyplot as plt img1_color = cv2.imread('/home/tgiencov/Registration Codes/Python image registration/im1.JPG') # Image to be aligned. img2_color = cv2.imread('/home/tgiencov/Registration Codes/Python image registration/im2.JPG') # Reference image. print(img1_color.shape) img1 = cv2.cvtColor(img1_color, cv2.COLOR_BGR2GRAY) img2 = cv2.cvtColor(img2_color, cv2.COLOR_BGR2GRAY) height, width = img2.shape #print(height) #print(width) orb_detector = cv2.ORB_create(5000) #plt.imshow(img1) #plt.show() #print(img1) kp1, d1 = orb_detector.detectAndCompute(img1, None) kp2, d2 = orb_detector.detectAndCompute(img2, None) print("Keyponts are") print(kp1.shape) # print('detectors are') # print(len(d1)) imgre = cv2.drawKeypoints(img1, kp1, None, color=(0,255,0), flags=0) plt.imshow(imgre), plt.show() matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck = True) # print(matcher) matches = matcher.match(d1, d2) print(matches[0]) matches.sort(key = lambda x: x.distance) matches = matches[:int(len(matches)*90)] no_of_matches = len(matches) # print(len(matches)) p1 = np.zeros((no_of_matches, 2)) p2 = np.zeros((no_of_matches, 2)) print(p2.shape) for i in range(len(matches)): p1[i, :] = kp1[matches[i].queryIdx].pt p2[i, :] = kp2[matches[i].trainIdx].pt print(p1[0,:]) print(kp1[0].pt) # homography, mask = cv2.findHomography(p1, p2, cv2.RANSAC) # transformed_img = cv2.warpPerspective(img1_color, homography, (width, height)) # cv2.imwrite('output.jpg', transformed_img)
true
f1c9dab32e30468a12f89cebe3fefa2e4144ed3a
Python
seancawley35/CS3A04-Coursework
/CS3A04-LAB6-SCAWLEY (3).py
UTF-8
10,783
3.9375
4
[]
no_license
Python 3.7.1 (v3.7.1:260ec2c36a, Oct 20 2018, 03:13:28) [Clang 6.0 (clang-600.0.57)] on darwin Type "help", "copyright", "credits" or "license()" for more information. >>> '''-----------------LAB 6--------------------------''' '''----------------PART 1-------------------------''' ''' -----------------PART 1: CODE------------------------ ''' # import the math function to use sqrt in the function import math #create a function to determine if two balls are colliding def isColliding(a,b): #create variables that determine the distance between two objects c = ((a[0] - b[0])**2) d = ((a[1] - b[1])**2) e = (c + d) #create a variable to check the sqrt of variable e distance = math.sqrt(e) #create a variable to determine the radii of both objects combined_radii = (a[2] + b[2]) #check if the distance between both objects is less than the radii if distance <= combined_radii: #return True if the distance is objects are colliding return True #return False if the if the objects are not colliding else: return False #circle = (x_a, y_a, r_a) a = (3, -4,0) #circle = (x_b, y_b, r_b) b = (-1,3,0) #use a variable to run the function state = isColliding(a,b) #print the result of the function print(state) ''' -----------------PART 1: OUTPUT------------------------------- False ''' '''------------------PART 2-------------------------------''' ''' ------------------PART 2: CODE----------------------------- ''' # set up a list to iterate on myCheeseList = ["Apple", "Asiago", "Brie", "Caerphilly", "Emmental", "Gloucester", "Gouda", ] for i in range(len(myCheeseList)): if i == 2: del (myCheeseList[4]) print(i, myCheeseList[i]) ''' ---------------------PART 2: OUTPUT-------------------------------- Traceback (most recent call last): 0 Apple 1 Asiago File "/Users/scawley/Library/Preferences/PyCharmCE2018.3/scratches/I/test i/scratch.py", line 9, in <module> 2 Brie print(i, myCheeseList[i]) 3 Caerphilly IndexError: list index out of range 4 Gloucester 5 Gouda #My Answer: Elements of a list can not change while iterating over the list with a for loop. To properly change the elements of a list,use a while loop because len(myCheeseList) recalculates each time around the loop The error occurs because the loop is trying to access myCheeseList[4] at the end of the list which used to be the value Gloucestor. After the value Emmental is deleted, myCheeseList[4] is deleted from the list. If you append an item to your list while running the for loop, the loop will not process them because the range iterator functions on the length of your list when the for loop begins. ''' '''--------------PART 3 ----------------------------------''' ''' --------------PART 3: FILES--------------------------------- Fact 1: Opening a file Opening the file communicates with your operating system, which knows where the data for each file is stored. When you open a file, you are asking the operating system to find the file by name and make sure the file exists. If the file does not exist, open will fail with a traceback and you will not get a handle to access the contents of the file Fact 2: Searching through a file When you are searching through data in a file, it is a very common pattern to read through a file, ignoring most of the lines and only processing lines which meet a particular condition. The basic idea of the search loop is that you are looking for “interesting” lines and effectively skipping “uninteresting” lines. ''' ''' --------------PART 3: DICTIONARIES----------------------------- Fact 1: Get Method Dictionaries have a method called get that takes a key and a default value. If the key appears in the dictionary, get returns the corresponding value; otherwise it returns the default value. Fact 2: Dictionaries and Files One of the common uses of a dictionary is to count the occurrence of words in a file with some written text. ''' ''' --------------PART 3: TUPLES----------------------------------- Fact 1: Tuples are immutable A tuple1 is a sequence of values much like a list. The values stored in a tuple can be any type, and they are indexed by integers. The important difference is that tuples are immutable. Tuples are also comparable and hashable so we can sort lists of them and use tuples as key values in Python dictionaries. Fact 2: Comparing tuples The comparison operators work with tuples and other sequences. Python starts by comparing the first element from each sequence. If they are equal, it goes on to the next element, and so on, until it finds elements that differ. Subsequent elements are not considered (even if they are really big). ''' '''-----------PART 4-----------------------------------''' ''' --------------PART 4: CODE-------------------------------- ''' #create a dictionary that stores words in english, spanish, and greek animal_names = { "en" :{ "bee": { "sp": "abeja", "gr":"μέλισσα" }, "iguana":{ "sp":'iguana', "gr":'ιγκουάνα' }, "scorpion" :{ "sp":'alacrán', "gr": 'σκορπιός' }, "giraffe":{ "sp":'jirafa', "gr": 'καμηλοπάρδαλη' }, "spider": { "sp": 'araña', "gr": 'αράχνη' } }, "sp" :{ "abeja": { "en": "bee", "gr":"μέλισσα" }, "iguana":{ "en": 'iguana', "gr": 'ιγκουάνα' }, "alacrán":{ "en": 'scorpion', "gr": 'σκορπιός' }, "jirafa":{ "en": 'giraffe', "gr": 'καμηλοπάρδαλη' }, "araña":{ "en": 'spider', "gr": 'αράχνη' } }, "gr": { "μέλισσα" : { "en": "bee","sp": "abeja" }, "ιγκουάνα":{ "en": 'iguana', "sp": 'iguana' }, "σκορπιός":{ "en": 'scorpion', "sp":'alacrán' }, "καμηλοπάρδαλη":{ "en": 'giraffe', "sp": 'jirafa' }, "αράχνη":{ "en": 'spider', "sp": 'araña' } } } #create a dictionary that stores spanish translations of english words spanish_names = {'bee': 'abeja', 'iguana': 'iguana', 'scorpion': 'alacrán', 'giraffe': 'jirafa', 'spider':'araña'} #Part II #create the the function translate(word) for converting english words to spanish def translate(word): #check if the english word exists in the dictionary if word in spanish_names: #if the word is in the dictionary, return the spanish translation print (spanish_names[word]) #if the word does not exist in the dictionary, return an empty string else: print ('') #run four successful test cases of the function translate('bee') translate('iguana') translate('giraffe') translate('scorpion') #test one unsuccessful test case with the word 'flea' not in dictionary translate('flea') print ('\n') #create a function that translates words from dictionaries in english, spanish, and greek def translate(fm, to, word): #check if the word exists in the input dictionary if word in animal_names[fm]: #if the word exists in the fm dictionary, return the to translation of the word print(animal_names[fm][word][to]) #if the word does not exist in the fm dictionary, return an empty string else: print('') #run ten successful and one unsuccessful test cases of the function translate('en','sp','bee') translate('sp', 'gr','iguana') translate('gr', 'en', 'σκορπιός') translate('en', 'gr', 'giraffe') translate('sp','en','araña') translate('gr','sp','μέλισσα') translate('en','sp','spider') translate('gr','en','ιγκουάνα') translate('en','gr','scorpion') translate('sp','gr','abeja') #non-existent word translate('gr', 'sp', 'dog') ''' ------------------PART 4: OUTPUT----------------------- abeja iguana jirafa alacrán abeja ιγκουάνα scorpion καμηλοπάρδαλη spider abeja araña iguana σκορπιός μέλισσα ''' '''-----------------PART 5---------------------''' '''-----------------PART 5: CODE-----------------''' # create a function to read the english, spanish, and german word files def transfer_words(file_name): # use an empty list to store all three files word_list = [] try: # open each word file with open(file_name, "r", encoding='utf-8') as f: # add all of the words to the in each file to the list for next in f: word_list.append(next.strip()) # if the words do not exist, return except except: print('except') # return the list of words return word_list # create a function to hold each file of words in English, Spanish, and Greek def create_dictionary(): # create variables to hold each file of words in English, Spanish, and Greek en_words = transfer_words('1kwords.en.txt') sp_words = transfer_words('1kwords.sp.txt') gr_words = transfer_words('1kwords.gr.txt') giant_dictionary = {'en': {}, 'sp': {}, 'gr': {}} # check for words in the English dictionary and add there translations from the other two dictionaries for i, word in enumerate(en_words): giant_dictionary['en'][word] = {'sp': sp_words[i], 'gr': gr_words[i]} # check for words in the Spanish dictionary and add there translations from the other two dictionaries for j, word in enumerate(sp_words): giant_dictionary['sp'][word] = {'en': en_words[i], 'gr': gr_words[i] # check for words in the Greek dictionary and add there translations from the other two dictionaries for k, word in enumerate(gr_words): giant_dictionary['gr'][word] = {'en': en_words[i], 'sp': sp_words[i]} # return the new dictionary that contains all three word dictionaries return giant_dictionary all_words = create_dictionary() # create a function that translates words from dictionaries in english, spanish, and greek def translation(frm, too, word): # check if the word exists in the input dictionary if word in all_words[frm]: # if the word exists in the fm dictionary, return the to translation of the word print(all_words[frm][word][too]) # if the word does not exist in the fm dictionary, return an empty string else: print('') # test four cases of the program translation('en', 'sp', 'man') translation('en', 'sp', 'this') translation('en', 'sp', 'is') translation('en', 'sp', 'dog') ''' ----------PART 5: OUTPUT---------------------------- usted este es perro '''
true
6a87ea55ea6ea6f949a22fd558cd9255b6984893
Python
JuanRx19/TallerFinal
/Ejercicio 5.py
UTF-8
284
3.59375
4
[]
no_license
PC = eval(input("Por favor digite la cantidad de dinero en pesos Colombianos")) D = PC/3500 - (PC/3500 * 0.2) print("Peso Colombiano a Dolar: ", D) Ye = PC/34 - (PC/34 * 0.2) print("Peso Colombiano a Yenes: ", Ye) E = PC/4300 - (PC/34 * 0.2) print("Peso Colombiano a Euro: ", E)
true
25f03ca26b6c2dab225d26a3ed012eb8eb875d94
Python
keleshev/docopt-dispatch
/test_docopt_dispatch.py
UTF-8
1,929
2.828125
3
[ "MIT" ]
permissive
from pytest import raises, yield_fixture as fixture from docopt_dispatch import Dispatch, DispatchError class OptionMarker(Exception): pass class ArgumentMarker(Exception): pass doc = 'usage: prog [--option] [<argument>]' @fixture def dispatch(): dispatch = Dispatch() @dispatch.on('--option') def option(**kwargs): raise OptionMarker(kwargs) @dispatch.on('<argument>') def argument(**kwargs): raise ArgumentMarker(kwargs) yield dispatch def test_dispatch_can_dispatch_on_option(dispatch): with raises(OptionMarker) as error: dispatch(doc, '--option') assert error.value.message == {'option': True, 'argument': None} def test_dispatch_can_dispatch_on_argument(dispatch): with raises(ArgumentMarker) as error: dispatch(doc, 'hi') assert error.value.message == {'option': False, 'argument': 'hi'} def test_dispatch_will_raise_error_if_it_cannot_dispatch(dispatch): with raises(DispatchError) as error: dispatch(doc, '') message = ('None of dispatch conditions --option, <argument> ' 'is triggered') assert error.value.message == message class MultipleDispatchMarker(Exception): pass @fixture def multiple_dispatch(): dispatch = Dispatch() @dispatch.on('--option', '<argument>') def option_argument(**kwargs): raise MultipleDispatchMarker(kwargs) yield dispatch def test_multiple_dispatch(multiple_dispatch): with raises(MultipleDispatchMarker) as error: multiple_dispatch(doc, 'hi --option') assert error.value.message == {'option': True, 'argument': 'hi'} def test_multiple_dispatch_will_raise_error(multiple_dispatch): with raises(DispatchError) as error: multiple_dispatch(doc, '--option') message = ('None of dispatch conditions --option <argument> ' 'is triggered') assert error.value.message == message
true
5ab3749e3a922cf0568b6a43005932aba4858757
Python
stare-star/Keras-Snake-DQN
/snake-DQN.py
UTF-8
12,145
2.578125
3
[]
no_license
#!/usr/bin/env python from __future__ import print_function import argparse import time import skimage as skimage from skimage import transform, color, exposure from skimage.transform import rotate from skimage.viewer import ImageViewer import sys sys.path.append("game/") import snake as game import random import numpy as np from collections import deque import json from keras.initializers import normal, identity from keras.models import model_from_json from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Convolution2D, MaxPooling2D from keras.optimizers import SGD, Adam import tensorflow as tf import matplotlib.pyplot as plt path = "model_DQN5.h5" lenth = 100 def procImg(img): proc_img = skimage.color.rgb2gray(img) proc_img = skimage.transform.resize(proc_img, (80, 80)) proc_img = skimage.exposure.rescale_intensity(proc_img, out_range=(0, 255)) proc_img = proc_img / 255.0 return proc_img class DQN(): def __init__(self): self.Dead = deque() self.D = deque() self.Dfood = deque() self.model = Sequential() self.startTime = time.time() self.GAME = 'snake' # the name of the game being played for log files self.CONFIG = 'nothreshold' self.ACTIONS = 3 # number of valid actions self.GAMMA = 0.99 # decay rate of past observations self.OBSERVATION = 3200. # timesteps to observe before training self.EXPLORE = 3000000. # frames over which to anneal epsilon self.FINAL_EPSILON = 0.0001 # final value of epsilon self.INITIAL_EPSILON = 0.1 # starting value of epsilon self.REPLAY_MEMORY = 30000 # number of previous transitions to remember self.BATCH = 128 # size of minibatch self.FRAME_PER_ACTION = 1 self.LEARNING_RATE = 1e-4 self.img_rows = 80 self.img_cols = 80 self.img_channels = 4 # We stack 4 frames self.input_shape = (self.img_rows, self.img_cols, self.img_channels) self.topScore = 0 self.scorelist = [] self.timelist = [] def buildDQN(self): print("Now we build the model") self.model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same', input_shape=self.input_shape)) # 80*80*4 self.model.add(Activation('relu')) self.model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same')) self.model.add(Activation('relu')) self.model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same')) self.model.add(Activation('relu')) self.model.add(Flatten()) self.model.add(Dense(512)) self.model.add(Activation('relu')) self.model.add(Dense(self.ACTIONS)) adam = Adam(lr=self.LEARNING_RATE) self.model.compile(loss='mse', optimizer=adam) print("We finish building the model") return self.model def trainNetwork(self, model, args): # open up a game state to communicate with emulator game_state = game.GameState() # store the previous observations in replay memory # get the first state by doing nothing and preprocess the image to 80x80x4 # 输入为a_t((1, 0,0,0)代表上下左右) do_nothing = np.zeros(self.ACTIONS) do_nothing[0] = 1 x_t, r_0, terminal, score = game_state.frame_step(do_nothing) x_t = procImg(x_t) s_t = np.stack((x_t, x_t, x_t, x_t), axis=2) # print (s_t.shape) # In Keras, need to reshape s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) # 1*80*80*4 if args['mode'] == 'Run': OBSERVE = 999999999 # We keep observe, never train self.epsilon = self.FINAL_EPSILON print("Now we load weight") self.model.load_weights(path) adam = Adam(lr=self.LEARNING_RATE) self.model.compile(loss='mse', optimizer=adam) print("Weight load successfully") elif args['mode'] == 'Train': # We go to training mode OBSERVE = self.OBSERVATION self.epsilon = self.INITIAL_EPSILON elif args['mode'] == 'reTrain': # contiune train with self.epsilon = 0.1 print("Now we load weight") self.model.load_weights(path) adam = Adam(lr=self.LEARNING_RATE) self.model.compile(loss='mse', optimizer=adam) print("Weight load successfully") OBSERVE = self.OBSERVATION self.epsilon = 0.1 else: # contiune train print("Now we load weight") self.model.load_weights(path) adam = Adam(lr=self.LEARNING_RATE) self.model.compile(loss='mse', optimizer=adam) print("Weight load successfully") OBSERVE = self.OBSERVATION self.epsilon = np.load("e.npy") self.epsilon = float(self.epsilon) t = 0 step = 0 score_pre = 0 while (True): step += 1 # print(step) loss = 0 Q_sa = 0 action_index = 0 r_t = 0 # 输入为a_t((1, 0)代表不跳,(0,1)代表跳)。 a_t = np.zeros([self.ACTIONS]) # choose an action epsilon greedy # 每一帧都处理,做一步动作,并储存记忆 if t % self.FRAME_PER_ACTION == 0: if random.random() <= self.epsilon: # 小于epsilon 随机动作 print("----------Random Action----------") action_index = random.randrange(self.ACTIONS) a_t[action_index] = 1 else: # 大于等于epsilon 通过预测的Q_table,选取最优动作 q = model.predict(s_t) # input a stack of 4 images, get the prediction max_Q = np.argmax(q) action_index = max_Q a_t[max_Q] = 1 # We reduced the epsilon gradually # 随着训练次数的增加,逐渐减小epsilon,即减少随机动作(探索),更倾向于预测值 if self.epsilon > self.FINAL_EPSILON and t > OBSERVE: self.epsilon -= (self.INITIAL_EPSILON - self.FINAL_EPSILON) / self.EXPLORE # run the selected action and observed next state and reward x_t1_colored, r_t, terminal, score = game_state.frame_step(a_t) if terminal: self.plot_M(score_pre, step, t), step = 0 if score > self.topScore: self.topScore = score score_pre = score x_t1 = procImg(x_t1_colored) x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) # 1x80x80x1 s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3) # store the transition in D self.D.append((s_t, action_index, r_t, s_t1, terminal)) if r_t == 1: self.Dfood.append((s_t, action_index, r_t, s_t1, terminal)) if r_t == -1: self.Dead.append((s_t, action_index, r_t, s_t1, terminal)) # 控制D(记忆的大小),忘记以前的事情 if len(self.D) > self.REPLAY_MEMORY: self.D.popleft() if len(self.Dfood) > self.REPLAY_MEMORY//10: self.Dfood.popleft() if len(self.Dead) > self.REPLAY_MEMORY//10: self.Dead.popleft() # only train if done observing # 从记忆库中选取片段训练,即经验回放 if t > OBSERVE: loss, Q_sa = self.learn() # 迭代,准备下一步 s_t = s_t1 t = t + 1 # save progress every 10000 iterations # 每1000次,保存模型 if t % 1000 == 0: self.saveModel() if t % 10 == 0: self.printIofo(t, OBSERVE, action_index, r_t, Q_sa, loss, score) print("Episode finished!") print("************************") def learn(self): # sample a minibatch to train on minibatch = random.sample(self.D, int(self.BATCH / 2)) try: minibatch += random.sample(self.Dead, int(self.BATCH / 4)) except: minibatch += random.sample(self.Dead * 100, int(self.BATCH / 4)) try: minibatch += random.sample(self.Dfood, int(self.BATCH / 4)) except: minibatch += random.sample(self.Dfood * 100, int(self.BATCH / 4)) # Now we do the experience replay state_t, action_t, reward_t, state_t1, terminal = zip(*minibatch) state_t = np.concatenate(state_t) state_t1 = np.concatenate(state_t1) targets = self.model.predict(state_t) Q_sa = self.model.predict(state_t1) targets[range(self.BATCH), action_t] = reward_t + self.GAMMA * np.max(Q_sa, axis=1) * np.invert( terminal) loss = self.model.train_on_batch(state_t, targets) return loss, Q_sa def printIofo(self, t, OBSERVE, action_index, r_t, Q_sa, loss, score): # print info # 打印状态 state = "" if t <= OBSERVE: state = "observe" # 探索期 elif t > OBSERVE and t <= OBSERVE + self.EXPLORE: state = "explore" else: state = "train" timeCost = int(time.time() - self.startTime) print("TIMESTEP", t, "/TIMECOST", timeCost, "/ STATE", state, \ "/ EPSILON", self.epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \ "/ Q_MAX ", np.max(Q_sa), "/ Loss ", loss, "/ \nTop score", self.topScore, "\nScore", score, \ "\ncount", len(self.scorelist) ) def saveModel(self): print("Now we save model") self.model.save_weights(path, overwrite=True) # with open("model_DQN4.json", "w") as outfile: # json.dump(self.model.to_json(), outfile) np.save("e.npy", np.array(self.epsilon)) def saveHistory(self, timecost): np.save("history/score%d.npy" % timecost, np.array(self.scorelist)) np.save("history/step%d.npy" % timecost, np.array(self.timelist)) def playGame(self, args): # 创建DQN模型 model = self.buildDQN() # 传入模型,训练 self.trainNetwork(model, args) def plot_M(self, score, time, t): self.scorelist.append(score) self.timelist.append(time) if len(self.scorelist) == lenth: print(self.timelist) self.plot(self.scorelist, self.timelist, t) def plot(self, scorelist, timelist, timecost): # plt.plot([[x for x in range(10)],[x for x in range(10)],[x for x in range(10)]],[np.array(scorelist),np.array(timelist),np.array(scorelist)/np.array(timelist)]) # print(timelist) plt.figure(1) plt.subplot(211) plt.plot([x for x in range(lenth)][::1], timelist[::1]) plt.title(np.average(timelist)) plt.subplot(212) plt.plot([x for x in range(lenth)][::1], scorelist[::1]) plt.title(np.average(scorelist)) plt.savefig('pic/step_score%d.png' % timecost) plt.show() # 保存分数和步数 self.saveHistory(timecost) self.scorelist = [] self.timelist = [] def main(): # 解析参数 parser = argparse.ArgumentParser(description='Description of your program') parser.add_argument('-m', '--mode', help='Train / Run', required=True) args = vars(parser.parse_args()) # 实例化,训练 dqn = DQN() dqn.playGame(args) if __name__ == "__main__": config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) from keras import backend as K K.set_session(sess) # K.set_session(tf.Session(config=tf.ConfigProto(device_count={'cpu': 0}))) main()
true
52a9b58db7dfc76b0ca0aea9a0228e59ac4e9c69
Python
nickliqian/team-learning
/数据挖掘实践(二手车价格预测)/quantile2.py
UTF-8
875
3.3125
3
[]
no_license
import pandas as pd import numpy as np def box_plot_outliers(data_ser, box_scale): """ 利用箱线图去除异常值 :param data_ser: 接收 pandas.Series 数据格式 :param box_scale: 箱线图尺度, :return: """ # 3/4分位 - 1/4分位的差,乘上缩放尺度 iqr = box_scale * (data_ser.quantile(0.75) - data_ser.quantile(0.25)) val_low = data_ser.quantile(0.25) - iqr # 离群值下限值 val_up = data_ser.quantile(0.75) + iqr # 离群值上限值 rule_low = (data_ser < val_low) # 筛选过大和过小的离群值为False rule_up = (data_ser > val_up) return (rule_low, rule_up), (val_low, val_up) s = pd.Series([-567, -367, -300, 1, 2, 3, 4, 23, 26, 40, 56, 78, 79, 84, 90, 95, 150, 345, 785, 346, 436]) a, b = box_plot_outliers(s, 3) print(b) print(a) print(np.arange(s.shape[0])[a[0] | a[1]])
true
424ff212880d56915fdafe1dab55068c864da731
Python
jkrumbiegel/jktools
/jktools/geometry/read_svg_paths.py
UTF-8
2,087
2.828125
3
[]
no_license
from svg.path import parse_path from svg.path.path import Path, CubicBezier, Arc, QuadraticBezier, Move from matplotlib.path import Path as mPath import xmltodict import numpy as np from jktools.geometry import remove_redundant_movetos from collections import OrderedDict def read_svg_paths(svg_file): with open(svg_file, 'r') as f: xml = f.read() xml_dict = xmltodict.parse(xml) path_dicts = xml_dict['svg']['path'] paths = OrderedDict((pd['@id'], pd['@d']) for pd in path_dicts) return paths def convert_svg_path_to_mpl(path_string): parsed = parse_path(path_string) vertices = [] codes = [] i_to_list = lambda i: [i.real, i.imag] for part in parsed: if isinstance(part, Move): vertices.append(i_to_list(part.start)) codes.append(mPath.MOVETO) elif isinstance(part, CubicBezier): vertices.extend([i_to_list(part.start), i_to_list(part.control1), i_to_list(part.control2), i_to_list(part.end)]) codes.extend([mPath.MOVETO, mPath.CURVE4, mPath.CURVE4, mPath.CURVE4]) elif isinstance(part, QuadraticBezier): raise Exception('QuadraticBezier not implemented.') elif isinstance(part, Arc): raise Exception('Arc not implemented.') else: raise Exception(f'{type(part)} not implemented.') if parsed.closed: vertices.append([np.nan, np.nan]) codes.append(mPath.CLOSEPOLY) path = mPath(np.array(vertices), np.array(codes, dtype=np.uint8)) return remove_redundant_movetos(path) # svg_paths = read_svg_paths('/Users/juliuskrumbiegel/Dropbox/Uni/Mind and Brain/Rolfslab Master/Visuals/head-top-down.svg') # paths = dict((key, convert_svg_path_to_mpl(value)) for key, value in svg_paths.items()) # # # import matplotlib.pyplot as plt # from matplotlib.patches import PathPatch # # fig, ax = plt.subplots(1) # # for name, path in paths.items(): # ax.add_patch(PathPatch(path, edgecolor='k', facecolor='none')) # # plt.xlim(0, 300) # plt.ylim(0, 300) # plt.axis('equal') # # plt.show()
true
65e19a8f7c958bda6764c9cdfb1c6141d5a288ad
Python
thejayhaykid/Python
/Geog5222/indexing/rtree2.py
UTF-8
4,745
2.90625
3
[ "MIT" ]
permissive
""" R-tree, part 2 Contact: Ningchuan Xiao The Ohio State University Columbus, OH """ __author__ = "Ningchuan Xiao <ncxiao@gmail.com>" from math import ceil from rtree1 import * # e is an extent def insert(node, e, child=None): for ent in node.entries: # already in tree if ent.MBR == e: return True entry = Entry(extent=e, child=child) # create a new entry if len(node.entries) < node.M: # there is room entry.node = node if entry.child is not None: entry.child.parent = entry node.entries.append(entry) node.update_up() return True M = node.M # overflowing node needs to be split m = ceil(float(M)/2) L1 = RTreeNode(M) L2 = RTreeNode(M) maxi, maxj = -1, -1 maxdist = 0.0 tmpentries = [ent for ent in node.entries] tmpentries.append(entry) M1 = len(tmpentries) # get the farthest apart MBRs as seeds for i in range(M1): for j in range(i+1, M1): d = tmpentries[i].MBR.distance(tmpentries[j].MBR) if d>maxdist: maxdist = d maxi = i maxj = j e1 = tmpentries[maxi] e2 = tmpentries[maxj] allexts = [] # holds the rest of the MBRs for ext in tmpentries: if ext is not e1 and ext is not e2: allexts.append(ext) L1.entries.append(e1) L2.entries.append(e2) L1.update() L2.update() while len(allexts): numremained = len(allexts) gotonode = None if len(L1.entries) == m-numremained: gotonode = L1 elif len(L2.entries) == m-numremained: gotonode = L2 if gotonode is not None: while len(allexts): ext = allexts.pop() gotonode.entries.append(ext) else: minarea = union_extent(L1.extent,L2.extent).area() minext = -1 gotonode = None for i in range(len(allexts)): tmpext1 = union_extent(L1.extent, allexts[i].MBR) tmparea1 = tmpext1.area() - L1.extent.area() tmpext2 = union_extent(L2.extent, allexts[i].MBR) tmparea2 = tmpext2.area() - L2.extent.area() if min(tmparea1, tmparea2) > minarea: continue minext = i if tmparea1 < tmparea2: if tmparea1 < minarea: tmpgotonode = L1 minarea = tmparea1 elif tmparea2 < tmparea1: if tmparea2 < minarea: tmpgotonode = L2 minarea = tmparea2 else: minarea = tmparea1 if L1.extent.area() < L2.extent.area(): tmpgotonode = L1 elif L2.extent.area() < L1.extent.area(): tmpgotonode = L2 else: if len(L1.entries) < len(L2.entries): tmpgotonode = L1 else: tmpgotonode = L2 if minext <> -1 and tmpgotonode is not None: ext = allexts.pop(minext) gotonode = tmpgotonode gotonode.entries.append(ext) gotonode.update() for ent in L1.entries: ent.node = L1 if ent.child is not None: ent.child.parent = ent for ent in L2.entries: ent.node = L2 if ent.child is not None: ent.child.parent = ent split(node, L1, L2) L1.update_up() L2.update_up() return True def split(node, L1, L2): entry1 = Entry(L1.extent) entry2 = Entry(L2.extent) if node.is_root(): node.entries = [] entry1.node = node entry2.node = node entry1.child = L1 entry2.child = L2 node.entries.append(entry1) node.entries.append(entry2) L1.parent = entry1 L2.parent = entry2 return else: entry1.node = L1 L1.parent = node.parent L1.parent.child = L1 del node insert(L1.parent.node, L2.extent, L2) return def search_rtree_extent(node, e): if node.is_leaf(): return node best_entry = None intersect_area = -1 for ent in node.entries: tmp_area = ent.MBR.intersect(e) if tmp_area > intersect_area: intersect_area = tmp_area best_entry = ent return search_rtree_extent(best_entry.child, e)
true
0cb6e6ae39b92a935931901d68c5e1e9f09079c9
Python
oklinux/LRWiki
/lib/tools/payload_generator.py
UTF-8
588
3.265625
3
[]
no_license
import string import random CHARS = ( string.ascii_uppercase + string.ascii_lowercase + string.digits ) def random_string(length=10, chars=CHARS): """ Generate a random alphanumeric string of the specified length. """ return str(''.join(random.choice(chars) for _ in range(length))) def generate_doc_data(**data): """ Generate a random doc data. """ doc_data = { 'title': data.get('title', random_string()), 'text': data.get('text', random_string()), 'author': data.get('author', random_string()) } return doc_data
true
bcd79ad70538b3f779b5ddbeda8f28353244a647
Python
adambarnes5000/python-tetris
/buttons.py
UTF-8
1,077
3.3125
3
[]
no_license
# KY040 Python Class # Martin O'Hanlon # stuffaboutcode.com import RPi.GPIO as GPIO from time import sleep GPIO.setmode(GPIO.BCM) class Buttons: CLOCKWISE = 1 ANTICLOCKWISE = -1 def __init__(self, callback_map): self.map = callback_map for pin, callback in callback_map.items(): GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.add_event_detect(pin, GPIO.FALLING, callback=callback, bouncetime=250) def stop(self): for pin in self.map.keys(): GPIO.remove_event_detect(pin) GPIO.cleanup() # test if __name__ == "__main__": def moveLeft(): print "Move Left" def moveRight(): print "Move Right" def drop(): print "Drop" def rotate(): print "Rotate" buttons = Buttons({21: rotate, 26:moveLeft, 19:moveRight, 16:drop}) try: while True: sleep(0.1) finally: buttons.stop() GPIO.cleanup()
true
88b50030eada37a3ae693d91368136efa97aa40f
Python
20143104/KMU
/2017-1/python/homework(2)/12.py
UTF-8
111
3.078125
3
[]
no_license
import numpy as np a = np.arange(5 , dtype = float) print(a) a = np.arange(1 , 6 , 2, dtype = int) print(a)
true
1a930d6aaf41f80e82d929cf1cc577ff3e4f7cb8
Python
JD-Canada/OFspbMaster
/turbulence.py
UTF-8
580
2.6875
3
[]
no_license
import pandas as pd import math import numpy as np surfaceArea=0.0505 flow=0.035 rho=1000 diameter=0.254 viscdy=0.001 radius=0.5*diameter V=flow/(3.14*(radius*radius)) TLEN=0.038*diameter reynolds=diameter*V*rho/viscdy tintensity=0.16*(reynolds)**(-1.0/8.0) nut=V*tintensity*(3.0/2.0)**(0.5) k=(3.0/2.0)*(0.69*tintensity)**2 epsilon=0.09*(k**(3.0/2.0))/TLEN print "TLEN is %1.8f" %TLEN print "Turbulent viscosity is %1.8f" %nut print "Turbulent intensity is %1.8f" %tintensity print "Reynolds number is %d" %reynolds print "k is %1.8f" %k print "epsilon is %1.8f" %epsilon
true
1604290a9cf87961a9240d285c50e81d7ad04917
Python
Theadre/miniProjetPyhton3-S3
/correction/index.py
UTF-8
611
3.0625
3
[]
no_license
#!/usr/bin/python3 # -*- coding: utf-8 -* html = """ <html> <head> <meta http-equiv="Content-Type" content="text/html"; charset="UTF-8"> <title>Front page</title> </head> <body> <br><a href="1-creationEtPeuplement.py">1. Creer et peupler la base</a> <br><a href="2-ajoutEtu.py">2. Ajouter un etudiant</a> <br><a href="3-ajoutNote.py">3. Ajouter une note</a> <br><a href="4-afficherNotesEtu.py">4. Afficher les notes d'un etudiant</a> <br><a href="5-afficherNotesCours.py">5. Afficher les notes triees d'un cours</a> <br><a href="6-supprimerCours.py">6. Supprimer un cours</a> </body> """ print(html)
true
037a7e7576ddac46142cfee126c5cdf9dcf77eed
Python
masa-su/pixyzoo
/NewtonianVAE/utils/env.py
UTF-8
1,168
2.859375
3
[]
no_license
import cv2 import numpy as np import torch # Preprocesses an observation inplace (from float32 Tensor [0, 255] to [-0.5, 0.5]) def preprocess_observation_(observation, bit_depth): # Quantise to given bit depth and centre observation.div_(2 ** (8 - bit_depth)).floor_().div_(2 ** bit_depth).sub_(0.5) # Dequantise (to approx. match likelihood of PDF of continuous images vs. PMF of discrete images) observation.add_(torch.rand_like(observation).div_(2 ** bit_depth)) # Postprocess an observation for storage (from float32 numpy array [-0.5, 0.5] to uint8 numpy array [0, 255]) def postprocess_observation(observation, bit_depth): return np.clip(np.floor((observation + 0.5) * 2 ** bit_depth) * 2 ** (8 - bit_depth), 0, 2 ** 8 - 1).astype(np.uint8) def _images_to_observation(images, bit_depth): # images = torch.tensor(cv2.resize(images, (64, 64)).transpose( # 2, 0, 1), dtype=torch.float32) # Resize and put channel first # Quantise, centre and dequantise inplace images = torch.from_numpy(images.copy()).to(torch.float32) preprocess_observation_(images, bit_depth) return images
true
a7e126b08d03cdffb193e428d7506c92cc68d2c8
Python
yash2662/project
/linearReg.py
UTF-8
1,343
3.5
4
[]
no_license
# Data Preprocessing Template # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Data.csv') X = dataset.iloc[:, :-1].values y = dataset.iloc[:, 3].values # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) #fit_transform(self, X[, y]) Fit to data, then transform it. X_test = sc_X.transform(X_test) #transform(self, X[, copy]) Perform standardization by centering and scaling sc_y = StandardScaler() y_train = sc_y.fit_transform(y_train) ''' Standardize features by removing the mean and scaling to unit variance The standard score of a sample x is calculated as: z = (x - u) / s where u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False. Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using transform. '''
true
16cc8e3bf3ac208c4b8a99c6db22a90f364cb2a6
Python
XiaoyangzZ/pycharm
/strategy/Python_knowledge/df.freq.py
UTF-8
649
2.65625
3
[]
no_license
""" 时间序列的基础频率 D: Day 每日历日 B: BusinessDay 每工作日 H: hour 每小时 T or min: Minute 每分钟 S: 每秒 L or ms: milli 每毫秒(即每千分之一秒) U: 每微妙(即百万分之一秒) M: MonthEnd 每月最后一个日历日 BM: BusinessMonthEnd 每月最后一个工作日 MS: MonthBegin 每月第一个日历日 BMS: BusinessMonthBegin 每月第一个工作日 W-MON\W-TUE: Week 从指定的星期几开始算起,每周 WOM-1MON,WOM-2MON: WeekOfMonth 产生每月第一、第二、第三、第四周的星期几,例如,WOM-3FRI表示每月第3个星期五 详细见利用python进行数据分析 """
true
7b94dd525f6b54273a728f90cd272261adf6516a
Python
4rlm/python_essential
/get_started.py
UTF-8
761
3.65625
4
[]
no_license
## W3 Schools TUTORIAL: https://www.w3schools.com/python/python_getstarted.asp #################################### ## 1. == Version == # $ python --version #################################### ## 2. == Execute Script == # $ python3 hello.py #=> best # $ python hello.py #=> ok #################################### ## 3. == Python CLI == # $ python # Launch # $ Ctrl-D # Exit #################################### ## 4. == Pause Program == # input() #################################### ## 5. == Print == # print("Hello, World!") #################################### ## 6. == input() == # Similar to 'gets.chomp' (Command-line String Input) # print("Enter your name:") # name = input() # print("Hello, " + name) ####################################
true
0f9014c2e0ecfdb398924842ec3f25a52e7854e0
Python
MrigankaIsHere/cowin-availability
/Notify.py
UTF-8
1,098
2.796875
3
[]
no_license
import smtplib import os class Notify: def __init__(self, to, index, slots, age, date): self.to = to self.index = index self.slots = slots self.age = age self.date = date gmail_user = os.getenv('gmail_user') gmail_password = os.getenv('gmail_password') sent_from = gmail_user subject = 'Cowin Slots Availability at your area' body = "Hey, we found some slots at your area, check it out before it goes away. \n\nHospital Detail: " + self.index + "\nSlots: " + str(self.slots) + "\nAge: " + str(self.age) + "\nDate: " + self.date email_text = """ Subject: {0} {1} """.format(subject, body) try: server = smtplib.SMTP_SSL('smtp.gmail.com', 465) server.ehlo() server.login(gmail_user, gmail_password) server.sendmail(sent_from, self.to, email_text) server.close() print(f"Email sent to {self.to}") except: print('Something went wrong...')
true
9f15bea0fdd69d854b24bc80db7264bfed0f5ed1
Python
zayslash/CSSI
/AppEngine/helloApp/hello.py
UTF-8
1,453
2.796875
3
[]
no_license
import webapp2 import jinja2 JINJA_ENV = jinja2.Environment( loader= jinja2.FileSystemLoader("Templates") ) html_page= """ <html> <head> <title> Hello </title> </head> <body> <p> Hello Brooklyn, CSSI! </p> </body> </html> """ html_page2= """ <html> <body> <form action= "/" method="post"> Name: <input type="text" name="field1"/> <input type="submit" value="Submit"/> </form> </body> </html> """ class MainHandler(webapp2.RequestHandler): def get(self): self.response.write(html_page2) def post(self): self.response.headers['Content-Type'] = 'text/plain' #self.response.write(self.request.POST) if "field1" not in self.request.POST: self.response.write('field1 not found') else: field1 = self.request.POST['field1'] self.response.write("hello " + field1 + "!") class AboutHandler (webapp2.RequestHandler): def get(self): self.response.write("all about about") class GreetingHandler (webapp2.RequestHandler): def get(self): #self.response.write("booooooo, im casper") template_values= {"name":"brooklyn"} template = JINJA_ENV.get_template('index.html') self.response.write(template.render(template_values)) app = webapp2.WSGIApplication([ ('/', MainHandler), ('/about', AboutHandler), ('/howdy',GreetingHandler) ], debug= True)
true
610404bf604c7f5ff259aef7cbe10c13568283ab
Python
jennymhkao/python-projects
/Exercise 08/Exercise 8.5.gyp
UTF-8
857
3.921875
4
[]
no_license
'''Write a program to read through the mail box data and when you find line that starts with "From", you will split the line into words using the split function. We are interested in who sent the message, which is the second word on the From line. From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008 You will parse the From line and print out the second word for each From line, then you will also count the number of From (not From:) lines and print out a count at the end. ''' fname = input('Enter file: ') try: fhand = open(fname) except: print('File cannot be opened:', fname) exit() count = 0 for line in fhand: line = line.rstrip() if not line.startswith('From'): continue words = line.split() print(words[1]) count = count + 1 #print(count) print("There were", count, "lines in the file with From as the first word")
true
6318b78392394a366df89d88c1b542b44716b408
Python
dr-dos-ok/Code_Jam_Webscraper
/solutions_python/Problem_97/1367.py
UTF-8
1,142
3.203125
3
[]
no_license
map = {'a': 'y', 'c': 'e', 'b': 'h', 'e': 'o', 'd': 's', 'g': 'v', 'f': 'c', 'i': 'd', 'h': 'x', 'k': 'i', 'j': 'u', 'm': 'l', 'l': 'g', 'o': 'k', 'n': 'b', 'p': 'r', 's': 'n', 'r': 't', 'u': 'j', 't': 'w', 'w': 'f', 'v': 'p', 'y': 'a', 'x': 'm', 'q':'z', 'z':'q'} def rotate(s, amount): return s[-amount:] + s[:-amount] def count_numbers(a, b): count = 0 for i in range(a, b + 1): table = set() s = str(i) for amount in range(1, len(str(i))): s = rotate(str(i), amount) if s not in table and int(s) <= b and len(s) == len(str(i)) and i < int(s): # print '(' + str(i) + ', ' + s + ')' table.add(s) count += 1 #else: # print 'bad: (' + str(i) + ', ' + s + ') - ', s not in table, int(s) <= b , len(s) == len(str(i)), i < int(s) return count with open('in.txt') as f: f.readline() lines = f.readlines() for line, i in zip(lines, range(len(lines))): a, b = [int(x) for x in line.split()] print "Case #" + str(i + 1) + ': ' + str(count_numbers(a, b))
true
cca868073dcaa9e3d0a4e19338d530bdb6aa8878
Python
kylin5207/MachineLearning
/数据预处理/特征选择/autoFeatureSelection/Select_base_model.py
UTF-8
1,769
3.140625
3
[]
no_license
# -*- coding: utf-8 -*- """ Created on Mon Sep 16 11:27:32 2019 基于模型的特征选择 @author: 尚梦琦 """ from sklearn.linear_model import LogisticRegression from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import load_breast_cancer import matplotlib.pyplot as plt # 1. 加载数据集 cancer = load_breast_cancer() X, y = cancer.data, cancer.target print("initial data shape:", X.shape) featureName = cancer.feature_names X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) # 2. 构建基于随机森林的特征模型选择器 select = SelectFromModel(RandomForestClassifier(n_estimators=100, random_state=42), threshold="median") select.fit(X_train, y_train) X_train_selectFeature = select.transform(X_train) print("X_train shape:", X_train.shape) print("特征选择后的shape:", X_train_selectFeature.shape) # 3. 查看选中的特征 mask = select.get_support() selectFeatures = featureName[mask == True] print("选中的属性名:\n", selectFeatures) plt.matshow(mask.reshape(1, -1), cmap="GnBu") plt.xlabel("Sample Index") plt.show() # 4. 性能对比 lr1 = LogisticRegression() lr1.fit(X_train, y_train) print("Initial train Score:", lr1.score(X_train, y_train)) print("Initial test Score:", lr1.score(X_test, y_test)) lr2 = LogisticRegression() lr2.fit(X_train_selectFeature, y_train) X_test_selectFeature = select.transform(X_test) print("Selected Features train Score:", lr2.score(X_train_selectFeature, y_train)) print("Selected Features test Score:", lr2.score(X_test_selectFeature, y_test))
true
8439a1ed68ae7edadd081b290ad15d71e541303e
Python
sahg4n/FacialRecog
/gausianBlurFR.py
UTF-8
973
2.59375
3
[]
no_license
import cv2 import face_recognition as fr webcamStream = cv2.VideoCapture(0) allFaceLocs = [] while True: ret, curFrame = webcamStream.read() currFrameSmall = cv2.resize(curFrame, (0,0), fx=0.25, fy=0.25) faceLoc = fr.face_locations(currFrameSmall, 2, 'hog') for index, curFaceLoc in enumerate(faceLoc): top, right, bottom, left = curFaceLoc top = top*4 right = right*4 bottom = bottom*4 left = left*4 print('Found face {} at top:{}, right:{}, bottom:{}, and left:{}'.format(index+1, top, right, bottom, left)) frameToBlur = curFrame[top:bottom, left:right] blurredFrame = cv2.GaussianBlur(frameToBlur, (99,99), 9) curFrame[top:bottom, left:right] = blurredFrame cv2.rectangle(curFrame, (left, top), (right, bottom), (0,0,255), 2) cv2.imshow('webcam', curFrame) if cv2.waitKey(1) & 0xFF == ord('q'): break webcamStream.release() cv2.destroyAllWindows()
true
41e498e63d547d357056bbfdcb9c756df5500809
Python
uncharted-aske/research
/gromet/data/ml4ai_repo/example_call_ex1.py
UTF-8
12,334
2.734375
3
[ "Apache-2.0" ]
permissive
from gromet import * # never do this :) """ def bar(y: float) -> float: return y + 2 # bar_exp def foo(x: float) -> float: return bar(x) # bar_call def main(a: float, b: float) -> float: a = foo(a) # foo_call_1 b = foo(b) # foo_call_2 return a + b """ # ----------------------------------------------------------------------------- # GroMEt instance # ----------------------------------------------------------------------------- def generate_gromet() -> Gromet: variables = [] wires = [ # main Wire(uid=UidWire("W:main_a.foo_call_1_x_1"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:main.in.a"), tgt=UidPort("P:foo_call_1.in.x_1")), Wire(uid=UidWire("W:main_b.foo_call_2_x_2"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:main.in.b"), tgt=UidPort("P:foo_call_2.in.x_2")), Wire(uid=UidWire("W:main_foo_call_1_fo_1.main_exp_a"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:foo_call_1.out.fo_1"), tgt=UidPort("P:main_exp.in.a")), Wire(uid=UidWire("W:main_foo_call_2_fo_2.main_exp_b"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:foo_call_2.out.fo_2"), tgt=UidPort("P:main_exp.in.b")), Wire(uid=UidWire("W:main_exp_result.main_result"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:main_exp.out.result"), tgt=UidPort("P:main.out.result")), # foo Wire(uid=UidWire("W:foo_x.bar_call_y_1"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:foo.in.x"), tgt=UidPort("P:bar_call.in.y_1")), Wire(uid=UidWire("W:foo_bar_call_bo_1.fo"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:bar_call.out.bo_1"), tgt=UidPort("P:foo.out.fo")), # bar Wire(uid=UidWire("W:bar_y.exp_y"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:bar.in.y"), tgt=UidPort("P:bar_exp.in.y")), Wire(uid=UidWire("W:bar_exp_res.bo"), type=None, value_type=UidType("T:Float"), name=None, value=None, metadata=None, src=UidPort("P:bar_exp.out.result"), tgt=UidPort("P:bar.out.bo")), ] ports = [ # main input Port(uid=UidPort("P:main.in.a"), box=UidBox("B:main"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="a", value=None, metadata=None), Port(uid=UidPort("P:main.in.b"), box=UidBox("B:main"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="b", value=None, metadata=None), # main output Port(uid=UidPort("P:main.out.result"), box=UidBox("B:main"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name="result", value=None, metadata=None), # foo_call_1 input PortCall(uid=UidPort("P:foo_call_1.in.x_1"), box=UidBox("B:foo_call_1"), call=UidPort("P:foo.in.x"), type=UidType("PortInput"), value_type=UidType("T:Float"), name=None, value=None, metadata=None), # foo_call_1 output PortCall(uid=UidPort("P:foo_call_1.out.fo_1"), box=UidBox("B:foo_call_1"), call=UidPort("P:foo.out.fo"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name=None, value=None, metadata=None), # foo_call_2 input PortCall(uid=UidPort("P:foo_call_2.in.x_2"), box=UidBox("B:foo_call_2"), call=UidPort("P:foo.in.x"), type=UidType("PortInput"), value_type=UidType("T:Float"), name=None, value=None, metadata=None), # foo_call_2 output PortCall(uid=UidPort("P:foo_call_2.out.fo_2"), box=UidBox("B:foo_call_2"), call=UidPort("P:foo.out.fo"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name=None, value=None, metadata=None), # main_exp input Port(uid=UidPort("P:main_exp.in.a"), box=UidBox("B:main_exp"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="a", value=None, metadata=None), Port(uid=UidPort("P:main_exp.in.b"), box=UidBox("B:main_exp"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="b", value=None, metadata=None), # main_exp output Port(uid=UidPort("P:main_exp.out.result"), box=UidBox("B:main_exp"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name="result", value=None, metadata=None), # foo input Port(uid=UidPort("P:foo.in.x"), box=UidBox("B:foo"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="x", value=None, metadata=None), # foo output Port(uid=UidPort("P:foo.out.fo"), box=UidBox("B:foo"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name="fo", value=None, metadata=None), # bar_call input PortCall(uid=UidPort("P:bar_call.in.y_1"), box=UidBox("B:bar_call"), call=UidPort("P:bar.in.y"), type=UidType("PortInput"), value_type=UidType("T:Float"), name=None, value=None, metadata=None), # bar_call output PortCall(uid=UidPort("P:bar_call.out.bo_1"), box=UidBox("B:bar_call"), call=UidPort("P:bar.out.bo"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name=None, value=None, metadata=None), # bar input Port(uid=UidPort("P:bar.in.y"), box=UidBox("B:bar"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="y", value=None, metadata=None), # bar output Port(uid=UidPort("P:bar.out.bo"), box=UidBox("B:bar"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name="bo", value=None, metadata=None), # bar_exp input Port(uid=UidPort("P:bar_exp.in.y"), box=UidBox("B:bar_exp"), type=UidType("PortInput"), value_type=UidType("T:Float"), name="y", value=None, metadata=None), # bar_exp output Port(uid=UidPort("P:bar_exp.out.result"), box=UidBox("B:bar_exp"), type=UidType("PortOutput"), value_type=UidType("T:Float"), name="result", value=None, metadata=None), ] # -- main -- foo_call_1 = BoxCall(uid=UidBox("B:foo_call_1"), type=None, name=None, call=UidBox("B:foo"), ports=[UidPort("P:foo_call_1.in.x_1"), UidPort("P:foo_call_1.out.fo_1")], metadata=None) foo_call_2 = BoxCall(uid=UidBox("B:foo_call_2"), type=None, name=None, call=UidBox("B:foo"), ports=[UidPort("P:foo_call_2.in.x_2"), UidPort("P:foo_call_2.out.fo_2")], metadata=None) e1 = Expr(call=RefOp(UidOp("+")), args=[UidPort("P:main_exp.in.a"), UidPort("P:main_exp.in.b")]) main_exp = Expression(uid=UidBox("B:main_exp"), type=None, name=None, ports=[UidPort("P:main_exp.in.a"), UidPort("P:main_exp.in.b"), UidPort("P:main_exp.out.result")], tree=e1, metadata=None) main = Function(uid=UidBox("B:main"), type=None, name="main", ports=[UidPort("P:main.in.a"), UidPort("P:main.in.b"), UidPort("P:main.out.result")], # contents wires=[UidWire("W:main_a.foo_call_1_x_1"), UidWire("W:main_b.foo_call_2_x_2"), UidWire("W:main_foo_call_1_fo_1.main_exp_a"), UidWire("W:main_foo_call_2_fo_2.main_exp_b"), UidWire("W:main_exp_result.main_result")], boxes=[UidBox("B:foo_call_1"), UidBox("B:foo_call_2"), UidBox("B:main_exp")], junctions=None, metadata=None) # -- foo -- bar_call = BoxCall(uid=UidBox("B:bar_call"), type=None, name=None, call=UidBox("B:bar"), ports=[UidPort("P:bar_call.in.y_1"), UidPort("P:bar_call.out.bo_1")], metadata=None) foo = Function(uid=UidBox("B:foo"), type=None, name="foo", ports=[UidPort("P:foo.in.x"), UidPort("P:foo.out.fo")], # contents wires=[UidWire("W:foo_x.bar_call_y_1"), UidWire("W:foo_bar_call_bo_1.fo")], boxes=[UidBox("B:bar_call")], junctions=None, metadata=None) # -- bar -- e2 = Expr(call=RefOp(UidOp("+")), args=[UidPort("P:bar_exp.in.y"), Literal(uid=None, type=UidType("T:Float"), value=Val("2"), name=None, metadata=None)]) bar_exp = Expression(uid=UidBox("B:bar_exp"), type=None, name=None, ports=[UidPort("P:bar_exp.in.y"), UidPort("P:bar_exp.out.result")], tree=e2, metadata=None) bar = Function(uid=UidBox("B:bar"), type=None, name="bar", ports=[UidPort("P:bar.in.y"), UidPort("P:bar.out.bo")], # contents wires=[UidWire("W:bar_y.exp_y"), UidWire("W:bar_exp_res.bo")], boxes=[UidBox("B:bar_exp")], junctions=None, metadata=None) boxes = [main, foo_call_1, foo_call_2, main_exp, foo, bar_call, bar, bar_exp] _g = Gromet( uid=UidGromet("call_ex1"), name="call_ex1", type=UidType("FunctionNetwork"), root=main.uid, types=None, literals=None, junctions=None, ports=ports, wires=wires, boxes=boxes, variables=variables, metadata=None ) return _g # ----------------------------------------------------------------------------- # Script # ----------------------------------------------------------------------------- if __name__ == "__main__": gromet_to_json(generate_gromet())
true
bd8ed03e0eee2abd3b226d973102e38be2961b16
Python
mankabitm/chat
/Chat(TCP)/recv.py
UTF-8
274
2.703125
3
[]
no_license
#!/usr/bin/python2 import socket s=socket.socket(socket.AF_INET,socket.SOCK_STREAM) s.bind(("",9999)) s.listen(5) while 4>3: cliport,cliaddr=s.accept() print cliport.recv(100) #print "From client ip->",cliaddr r=raw_input("Enter your reply:") cliport.send(r) s.close()
true
89a128655a2cbc4e5295f442bae203efa2b18c69
Python
GHeeJeon/algorithm-collection
/pythonProject/review_3rd_week.py
UTF-8
1,241
3.640625
4
[]
no_license
# 외부 정렬 및 탐색은 다루지 않음. # 탐색 알고리즘이란? 컴퓨터에 저장된 자료를 신속하고 정확하게 찾아주는 알고리즘 # 내부 탐색 외부 탐색으로 나뉨. # 순차 탐색 알고리즘 class node: def __init__(self, key = None): self.key = key class Dict: def __init__(self): Dict.a = [] def search(self, search_key): left = 0 right = len(Dict.a) - 1 while right >= left: mid = int((left + right) / 2) if Dict.a[mid].key == search_key: return mid if Dict.a[mid].key > search_key: right = mid - 1 else: right = mid + 1 return 1 def insert(self, v): Dict.a.append(node(v)) import random import time N = 10000 key = list(range(1, N + 1)) s_key = list(range(1, N + 1)) random.shuffle(key) d = Dict() for i in range(N): d.insert(key[i]) start_time = time.time() for i in range(N): result = d.search(s_key[i]) if result == -1 or key[result] != s_key[i]: print('탐색오류') end_time = time.time() - start_time print('이진탐색의 실행시간 (N = %d) : %0.3f'%(N, end_time)) print('탐색완료')
true
53886949a119f7cc2d5bd5de4b6c0d1983ae37a0
Python
mouradfelipe/channel_decoding
/LAB1/generate_answers.py
UTF-8
1,871
2.875
3
[]
no_license
import numpy as np def mourad_check(msg): # msg deve ter tamanho 14 H = np.array([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 0, 1, 1], [1, 1, 0, 1, 1, 1], [1, 0, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0], [1,0,0,0,0,0], [0,1,0,0,0,0], [0,0,1,0,0,0], [0,0,0,1,0,0], [0,0,0,0,1,0], [0,0,0,0,0,1]]) return msg.dot(H) % 2 def next(array): n = np.sum(array) if n == 0: return np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) i = array.size - 1 j = 0 while array[i] == 1: i = i - 1 j = j + 1 if j == n: return np.array([1]*(n+1) + [0]*(14 - n - 1)) if j == 0: while array[i] == 0: i = i - 1 new_array = [] for elem in array: new_array.append(elem) new_array[i] = 0 new_array[i+1] = 1 return np.array(new_array) while array[i] == 0: i = i - 1 new_array = [] for elem in array: new_array.append(elem) for num in range(j): new_array[array.size - 1 - num] = 0 for num in range(j): new_array[i+1+num+1] = 1 new_array[i] = 0 new_array[i+1] = 1 return np.array(new_array) def convert(vector): acum = 0 i = vector.size - 1 j = 0 while i >= 0: acum = acum + vector[i]*(2**j) i = i - 1 j = j + 1 return acum table = {} for i in range(64): table[i] = np.array([]) a = np.array([0]*14) filled = 0 while(filled < 64): error_pattern = mourad_check(a) num = convert(error_pattern) if table[num].size == 0: table[num] = a filled = filled + 1 a = next(a) print(table) with open("LUT_table.txt", "w") as text_file: for i in range(64): for j in range(len(table[i])): text_file.write(str(table[i][j])) text_file.write("\n")
true
c7f0003b57170572676bb9cabc1c5e57baeb5c4e
Python
PvonK/Sudoku
/Interfaz_Sudoku_Test.py
UTF-8
10,688
3.0625
3
[]
no_license
import unittest import io from parameterized import parameterized from unittest.mock import patch, MagicMock from Interfaz_Sudoku import Interfaz from Sudoku import Sudoku class TestInterfazSudoku(unittest.TestCase): def setUp(self): self.user4 = Interfaz() self.user9 = Interfaz() lista4 = [["4", "x", "3", "1"], ["x", "3", "x", "x"], ["3", "1", "x", "2"], ["x", "4", "x", "x"]] self.user4.tam = 4 self.user4.level = "1" self.user4.game = Sudoku(lista4) lista9 = [ ["5", "3", "x", "x", "7", "x", "x", "x", "x"], ["6", "x", "x", "x", "9", "5", "x", "x", "x"], ["x", "9", "8", "x", "x", "x", "x", "6", "x"], ["8", "x", "x", "x", "6", "x", "x", "x", "3"], ["4", "x", "x", "8", "x", "3", "x", "x", "1"], ["7", "x", "x", "x", "2", "x", "x", "x", "6"], ["x", "6", "x", "x", "x", "x", "2", "8", "x"], ["x", "x", "x", "4", "1", "9", "x", "x", "5"], ["x", "x", "x", "x", "8", "x", "x", "7", "9"]] self.user9.tam = 9 self.user9.level = "1" self.user9.game = Sudoku(lista9) @parameterized.expand([ ("a", "4", "6"), ("2", "g", "6"), ("2", "4", "d"), ("!", "4", "6"), ("2", "!", "6"), ("2", "4", "!"), ("!", "4", "$"), ("+", "g", "6"), ("3", "as", "/"), (".", "%", "d") ]) def test_ingresar_letras_y_simbolos(self, numero, fila, columna): result = self.user4.ingresar(numero, fila, columna) self.assertFalse(result) def test_ingresar_letra_x_en_numero_correcto(self): result = self.user4.ingresar("x", 1, 1) self.assertTrue(result) def test_pedir_tam_4(self): with patch("builtins.input", return_value="4"): self.user4.pedir_tam() self.assertEqual(self.user4.tam, "4") @parameterized.expand([ ("2"), ("a"), ("!"), ("&"), ("49"), ("94") ]) @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_pedir_tam_mal(self, tamMal, mock_stdout): mock = MagicMock() mock.side_effect = [tamMal, "4"] with patch("builtins.input", new=mock): self.user4.pedir_tam() self.assertEqual(mock_stdout.getvalue(), "Ingrese 4 o 9 \n\n\n") def test_pedir_lvl_1(self): with patch("builtins.input", return_value="1"): self.user4.pedir_lvl() self.assertEqual(self.user4.level, "1") def test_pedir_lvl_2(self): with patch("builtins.input", return_value="2"): self.user4.pedir_lvl() self.assertEqual(self.user4.level, "2") def test_pedir_lvl_3(self): with patch("builtins.input", return_value="3"): self.user4.pedir_lvl() self.assertEqual(self.user4.level, "3") @parameterized.expand([ ("33"), ("a"), ("!"), ("&"), ("11"), ("22"), ("12"), ("23"), ("21"), ("123") ]) @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_pedir_lvl_mal(self, lvlMal, mock_stdout): mock = MagicMock() mock.side_effect = [lvlMal, "1"] with patch("builtins.input", new=mock): self.user4.pedir_lvl() self.assertEqual(mock_stdout.getvalue(), "Ingrese 1, 2 o 3 \n\n\n") @parameterized.expand([ ("2", "0", "1"), ("2", "-1", "1"), ("2", "5", "1"), ("2", "8", "1"), ("2", "19", "1"), ]) def test_pedir_valores_fila_mal4(self, numero, fila, columna): mock = MagicMock() mock.side_effect = [numero, fila, columna] with patch("builtins.input", new=mock): result = self.user4.pedirvalores() self.assertEqual(result, "\nIngrese numeros validos") @parameterized.expand([ ("2", "1", "0"), ("2", "1", "-1"), ("2", "1", "5"), ("2", "1", "8"), ("2", "1", "19"), ]) def test_pedir_valores_columna_mal4(self, numero, fila, columna): mock = MagicMock() mock.side_effect = [numero, fila, columna] with patch("builtins.input", new=mock): result = self.user4.pedirvalores() self.assertEqual(result, "\nIngrese numeros validos") @parameterized.expand([ ("0", "2", "1"), ("-1", "2", "1"), ("5", "2", "1"), ("8", "2", "1"), ("19", "2", "1"), ]) def test_pedir_valores_numero_mal4(self, numero, fila, columna): mock = MagicMock() mock.side_effect = [numero, fila, columna] with patch("builtins.input", new=mock): result = self.user4.pedirvalores() self.assertEqual(result, "\nIngrese numeros validos") @parameterized.expand([ ("2", "1", "2"), ("4", "3", "3"), ("1", "2", "1"), ("2", "4", "1"), ("2", "2", "3"), ("4", "2", "4"), ("1", "4", "3"), ("3", "4", "4"), ]) def test_pedir_valores_fila_bien4(self, numero, fila, columna): mock = MagicMock() mock.side_effect = [numero, fila, columna] with patch("builtins.input", new=mock): result = self.user4.pedirvalores() self.assertNotEqual(result, "\nIngrese numeros validos") def test_pedir_tam_9(self): with patch("builtins.input", return_value="9"): self.user9.pedir_tam() self.assertEqual(self.user9.tam, "9") @parameterized.expand([ ("4", "1", "3"), ("6", "1", "4"), ("8", "1", "6"), ("9", "1", "7"), ("1", "1", "8"), ("2", "1", "9"), ("7", "2", "2"), ("2", "2", "3"), ("1", "2", "4"), ("3", "2", "7"), ("4", "2", "8"), ("8", "2", "9"), ("1", "3", "1"), ("3", "3", "4"), ("4", "3", "5"), ("2", "3", "6"), ("5", "3", "7"), ("7", "3", "9"), ("5", "4", "2"), ("9", "4", "3"), ("7", "4", "4"), ("1", "4", "6"), ("4", "4", "7"), ("2", "4", "8"), ("2", "5", "2"), ("6", "5", "3"), ("5", "5", "5"), ("7", "5", "7"), ("9", "5", "8"), ("1", "6", "2"), ("3", "6", "3"), ("9", "6", "4"), ("4", "6", "6"), ("8", "6", "7"), ("5", "6", "8"), ("9", "7", "1"), ("1", "7", "3"), ("5", "7", "4"), ("3", "7", "5"), ("7", "7", "6"), ("4", "7", "9"), ("2", "8", "1"), ("8", "8", "2"), ("7", "8", "3"), ("6", "8", "7"), ("3", "8", "8"), ("3", "9", "1"), ("4", "9", "2"), ("5", "9", "3"), ("2", "9", "4"), ("6", "9", "6"), ("1", "9", "7"), ]) def test_pedir_valores_fila_bien9(self, numero, fila, columna): mock = MagicMock() mock.side_effect = [numero, fila, columna] with patch("builtins.input", new=mock): result = self.user9.pedirvalores() self.assertNotEqual(result, "\nIngrese numeros validos") @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_play_ganador4(self, mock_stdout): mock = MagicMock() mock.side_effect = ["2", "1", "2", "4", "3", "3", "1", "2", "1", "2", "4", "1", "2", "2", "3", "4", "2", "4", "1", "4", "3", "3", "4", "4"] with patch("Interfaz_Sudoku.Interfaz.inicio", return_value=None), patch("builtins.input", new=mock): self.user4.play() self.assertEqual(mock_stdout.getvalue()[-6:], "\n\nFIN\n") @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_play_ganador9(self, mock_stdout): mock = MagicMock() mock.side_effect = ["4", "1", "3", "6", "1", "4", "8", "1", "6", "9", "1", "7", "1", "1", "8", "2", "1", "9", "7", "2", "2", "2", "2", "3", "1", "2", "4", "3", "2", "7", "4", "2", "8", "8", "2", "9", "1", "3", "1", "3", "3", "4", "4", "3", "5", "2", "3", "6", "5", "3", "7", "7", "3", "9", "5", "4", "2", "9", "4", "3", "7", "4", "4", "1", "4", "6", "4", "4", "7", "2", "4", "8", "2", "5", "2", "6", "5", "3", "5", "5", "5", "7", "5", "7", "9", "5", "8", "1", "6", "2", "3", "6", "3", "9", "6", "4", "4", "6", "6", "8", "6", "7", "5", "6", "8", "9", "7", "1", "1", "7", "3", "5", "7", "4", "3", "7", "5", "7", "7", "6", "4", "7", "9", "2", "8", "1", "8", "8", "2", "7", "8", "3", "6", "8", "7", "3", "8", "8", "3", "9", "1", "4", "9", "2", "5", "9", "3", "2", "9", "4", "6", "9", "6", "1", "9", "7"] with patch("Interfaz_Sudoku.Interfaz.inicio", return_value=None), patch("builtins.input", new=mock): self.user9.play() self.assertEqual(mock_stdout.getvalue()[-6:], "\n\nFIN\n") if __name__ == '__main__': unittest.main()
true
ad3f903a4eb43941eb3846b6790c20208f263a55
Python
BabaVegato/StratObsGame
/server.py
UTF-8
1,225
2.96875
3
[ "CC-BY-4.0", "BSD-3-Clause" ]
permissive
import socket import threading import pickle import select class Server: def __init__(self): self.socket = None self.running = False self.conn = None self.info_rcvd = None def create_server(self, host, port): self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.bind((host, port)) self.socket.listen() self.running = True self.conn_addr = None self.conn = None print("Listening on port 5555 ...") def wait_for_a_connection(self): while self.conn_addr is None : self.conn, self.conn_addr = self.socket.accept() print(f"Connection from {self.conn_addr} has been established !") def send_obj(self, conn, obj): msg = pickle.dumps(obj) self.conn.send(msg) print("Sent an object : ", obj) print("----------------------") def wait_for_object(self, conn): while True: if(self.conn != None): msg = self.conn.recv(4096) d = pickle.loads(msg) self.info_rcvd = d print("Object received : ", d) print("----------------------")
true
75bb270f3c54a865a5029a0061f7c0128779e048
Python
vk-en/python
/One week/1.5.9.py
UTF-8
1,086
3.953125
4
[]
no_license
class Buffer: def __init__(self): self.List = [] def add(self, *a): self.List.extend(a) while len(self.List) // 5 > 0: Sum = sum(self.List[:5]) self.List = self.List[5:] print(Sum) def get_current_part(self): # print(self.List) return self.List def proba(): buf = Buffer() buf.add(1, 2, 3) assert buf.get_current_part() == [1, 2, 3] # вернуть [1, 2, 3] buf.add(4, 5, 6) # print(15) – вывод суммы первой пятерки элементов assert buf.get_current_part() == [6] # вернуть [6] buf.add(7, 8, 9, 10) # print(40) – вывод суммы второй пятерки элементов assert buf.get_current_part() == [] # вернуть [] buf.add(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) # print(5), print(5) – вывод сумм третьей и четвертой пятерки assert buf.get_current_part() == [1] # вернуть [1] ###ПОЧЕМУ ТО НЕ проходит if __name__ == "__main__": proba()
true
f3053b9b1b8f83eecc3d08bcfc00a62cab1e2cc1
Python
simone-campagna/mu-language
/lib/python/progressbar/span.py
UTF-8
13,768
3.265625
3
[]
no_license
#!/usr/bin/env python class SpanError(Exception): pass class SizedObj(object): def __init__(self, size=None): self._set_size(size) def get_size(self): return self._current_size def _set_size(self, size): self._current_size = size def __add__(self, other): other = sized_obj(other) return SizedObjAdd(self, other) def __radd__(self, other): other = sized_obj(other) return SizedObjAdd(self, other) def __sub__(self, other): other = sized_obj(other) return SizedObjSub(self, other) def __rsub__(self, other): other = sized_obj(other) return SizedObjSub(other, self) def __mul__(self, other): other = sized_obj(other) return SizedObjMul(self, other) def __rmul__(self, other): other = sized_obj(other) return SizedObjMul(self, other) def __div__(self, other): other = sized_obj(other) return SizedObjDiv(self, other) def __rdiv__(self, other): other = sized_obj(other) return SizedObjDiv(other, self) def __neg__(self): return SizedObjNeg(self) def __pos__(self): return SizedObjPos(self) def __str__(self): return "%s[%s]" % (self.__class__.__name__, self.get_size()) class SizedObjUnOp(SizedObj): OPERATOR = '?' def __init__(self, operand): assert isinstance(operand, SizedObj), "operand type %s is not %s" % (type(operand).__name__, SizedObj.__name__) self.operand = operand super(SizedObjUnOp, self).__init__() def __str__(self): return "%s(%s(%s))[%s]" % (self.__class__.OPERATOR, self.__class__.__name__, self.operand, self.get_size()) class SizedObjPos(SizedObjUnOp): OPERATOR = '+' def get_size(self): return self.operand.get_size() class SizedObjNeg(SizedObjUnOp): OPERATOR = '-' def get_size(self): return -self.operand.get_size() class SizedObjBinOp(SizedObj): OPERATOR = '?' def __init__(self, left, right): assert isinstance(left, SizedObj), "left operand type %s is not %s" % (type(left).__name__, SizedObj.__name__) assert isinstance(right, SizedObj), "right operand type %s is not %s" % (type(right).__name__, SizedObj.__name__) self.left = left self.right = right super(SizedObjBinOp, self).__init__() def __str__(self): return "(%s(%s(%s+%s))[%s]" % (self.__class__.OPERATOR, self.__class__.__name__, self.left, self.right, self.get_size()) class SizedObjAdd(SizedObjBinOp): OPERATOR = '+' def get_size(self): return self.left.get_size() + self.right.get_size() class SizedObjSub(SizedObjBinOp): OPERATOR = '-' def get_size(self): return self.left.get_size() - self.right.get_size() class SizedObjMul(SizedObjBinOp): OPERATOR = '*' def get_size(self): return self.left.get_size() * self.right.get_size() class SizedObjDiv(SizedObjBinOp): OPERATOR = '/' def get_size(self): return self.left.get_size() // self.right.get_size() def sized_obj(obj): if isinstance(obj, int): return SizedObj(obj) elif isinstance(obj, Span): return SizedObjPos(obj) elif isinstance(obj, SizedObj): return obj else: raise SpanError, "invalid type %s" % type(obj).__name__ class Span(SizedObj): def __init__(self, children=None, parent=None): super(Span, self).__init__() self.parent = None self.root = None self.level = None self.children = [] self.fixed_children = [] self.total_fraction_children = [] self.free_fraction_children = [] self.filler_children = [] self.flexible_children = [] self._set_parent(parent) if children: for child in children: self.add_child(child) self._setup() def set_root(self): self._set_root(self, 0) def _set_root(self, root, level): self.root = root self.level = level for child in self.children: child._set_root(root, level+1) def reset(self): self._set_size(None) for child in self.children: child.reset() def dump(self): ind = ' '*self.level if self.children: l = ["%s%s -> %s (%s)" % (ind, self, self._current_size, self._current_unassigned_size)] else: l = ["%s%s -> %s" % (ind, self, self._current_size)] for child in self.children: l.extend(child.dump()) return l def __str__(self): return "%s[%s]" % (self.__class__.__name__, self.get_size()) def _setup(self): self._set_size(0) self._current_unassigned_size = 0 self._current_float_rounding = 0.0 self._current_filling_size = 0 def _set_parent(self, parent): if parent is None: return assert isinstance(parent, Span), "parent is not a Span: %s" % parent assert self.parent is None, "cannot change parent" parent.add_child(self) def add_child(self, child): assert isinstance(child, Span), "child is not a Span" assert child.parent is None, "cannot get non-orphan child (%s->%s/%s)" % (self, child, child.parent) child.parent = self self.children.append(child) if isinstance(child, FixedSpan): self.fixed_children.append(child) elif isinstance(child, FillerSpan): self.filler_children.append(child) elif isinstance(child, TotalFractionalSpan): self.total_fraction_children.append(child) elif isinstance(child, FreeFractionalSpan): self.free_fraction_children.append(child) elif isinstance(child, FlexibleSpan): self.flexible_children.append(child) def update(self): if self is self.root: if not isinstance(self, FixedSpan): raise SpanError, "root span must be FixedSpan" self._update() self._current_unassigned_size = self._current_size for child in self.fixed_children: child.update() self._current_float_rounding = 0.0 for child in self.total_fraction_children: child.update() self._current_float_rounding = 0.0 self._current_free_size = self._current_unassigned_size for child in self.free_fraction_children: child.update() for child in self.flexible_children: child.update() if self.filler_children: self._current_filling_size = self._current_unassigned_size/float(len(self.filler_children)) for child in self.filler_children: child.update() def allocate_exact_size(self, child, size): obtained_size = self.parent.reserve_exact_size(child, size) if obtained_size is None: raise SpanError, ("%s: cannot allocate size: %d" % (child, size)) self._current_size = size def free_size(self): return self._current_free_size def total_size(self): return self._current_size def allocate_float_size(self, child, float_size): size = self.parent.reserve_float_size(child, float_size) self._current_size = size def allocate_filling_size(self, child): self._current_size = self.parent.reserve_float_size(self, self.parent._current_filling_size) def allocate_size(self, child, size): self._current_size = self.parent.reserve_size(child, size) def reserve_exact_size(self, child, size): if size <= self._current_unassigned_size: self._current_unassigned_size -= size return size else: return None def reserve_float_size(self, child, float_size): size = min(self._current_unassigned_size, int(round(float_size+self._current_float_rounding, 0))) self._current_float_rounding += float_size - size self._current_unassigned_size -= size return size def reserve_size(self, child, wanted_size): size = min(self._current_unassigned_size, wanted_size) self._current_unassigned_size -= size return size class FixedSpan(Span): def __init__(self, size, children=None, parent=None): super(FixedSpan, self).__init__(children=children, parent=parent) self._size = size self._set_size(size) self.set_size(size) def set_size(self, size): assert isinstance(size, int), "invalid size of type '%s' for %s" % (type(size).__name__, self.__class__.__name__) self._size = size def _update(self): size = self.get_size() if not self is self.root: # self._current_size = size #else: self.allocate_exact_size(self, size) def get_size(self): return self._size def __str__(self): return "%s(%s)[%s]" % (self.__class__.__name__, self._size, self.get_size()) class FixedDependentSpan(FixedSpan): def __init__(self, sized_obj, children=None, parent=None): super(FixedDependentSpan, self).__init__(size=0, children=children, parent=parent) self.set_sized_obj(sized_obj) def set_sized_obj(self, sized_obj): assert isinstance(sized_obj, SizedObj), "invalid sized_obj of type '%s' for %s" % (type(sized_obj).__name__, self.__class__.__name__) self._sized_obj = sized_obj def set_size(self, size): self.set_sized_obj(sized_obj(size)) def get_size(self): return self._sized_obj.get_size() def __str__(self): return "%s(%s)[%s]" % (self.__class__.__name__, self._sized_obj, self._sized_obj) class AdaptiveSpan(FixedSpan): def __init__(self, size=0, children=None, parent=None): super(AdaptiveSpan, self).__init__(size, children=children, parent=parent) class FlexibleSpan(Span): pass class FractionalSpan(FlexibleSpan): def __init__(self, fraction, children=None, parent=None): super(FractionalSpan, self).__init__(children=children, parent=parent) self.set_fraction(fraction) def get_fraction(self): return self._fraction def set_fraction(self, fraction): assert isinstance(fraction, float), "invalid fraction of type '%s' for %s" % (type(fraction).__name__, self.__class__.__name__) assert 0.0 <= fraction <= 1.0, "invalid fraction '%s' for '%s'" % (fraction, self.__class__.__name__) self._fraction = fraction def __str__(self): return "%s(%s)[%s]" % (self.__class__.__name__, self._fraction, self.get_size()) class TotalFractionalSpan(FractionalSpan): def __init__(self, fraction, children=None, parent=None): super(TotalFractionalSpan, self).__init__(fraction, children=children, parent=parent) def get_percentage(self): return self._percentage def set_percentage(self, percentage): assert isinstance(percentage, float), "invalid percentage of type '%s' for %s" % (type(percentage).__name__, self.__class__.__name__) assert 0.0 <= percentage <= 100.0, "invalid percentage '%s' for '%s'" % (percentage, self.__class__.__name__) self._percentage = percentage self.set_fraction(self._percentage/100.0) def _update(self): self.allocate_float_size(self, self.parent.total_size()*self._fraction) class TotalPercentualSpan(TotalFractionalSpan): def __init__(self, percentage, children=None, parent=None): assert isinstance(percentage, float), "invalid percentage of type '%s' for %s" % (type(percentage).__name__, self.__class__.__name__) assert 0.0 <= percentage <= 100.0, "invalid percentage '%s' for '%s'" % (percentage, self.__class__.__name__) self._percentage = percentage super(TotalPercentualSpan, self).__init__(self._percentage/100.0, children=children, parent=parent) def __str__(self): return "%s(%s)[%s]" % (self.__class__.__name__, self._percentage, self.get_size()) class FreeFractionalSpan(FractionalSpan): def __init__(self, fraction, children=None, parent=None): super(FreeFractionalSpan, self).__init__(fraction, children=children, parent=parent) def _update(self): self.allocate_float_size(self, self.parent.free_size()*self._fraction) class FreePercentualSpan(FreeFractionalSpan): def __init__(self, percentage, children=None, parent=None): assert isinstance(percentage, float), "invalid percentage of type '%s' for %s" % (type(percentage).__name__, self.__class__.__name__) assert 0.0 <= percentage <= 100.0, "invalid percentage '%s' for '%s'" % (percentage, self.__class__.__name__) self._percentage = percentage super(FreePercentualSpan, self).__init__(self._percentage/100.0, children=children, parent=parent) def get_percentage(self): return self._percentage def set_percentage(self, percentage): assert isinstance(percentage, float), "invalid percentage of type '%s' for %s" % (type(percentage).__name__, self.__class__.__name__) assert 0.0 <= percentage <= 100.0, "invalid percentage '%s' for '%s'" % (percentage, self.__class__.__name__) self._percentage = percentage self.set_fraction(self._percentage/100.0) def __str__(self): return "%s(%s)[%s]" % (self.__class__.__name__, self._percentage, self.get_size()) class FillerSpan(FlexibleSpan): def _update(self): self.allocate_filling_size(self) def span(obj, children=None, parent=None): if obj is None: return FillerSpan(children=children, parent=parent) elif isinstance(obj, int): return FixedSpan(obj, children=children, parent=parent) elif isinstance(obj, float): if obj < 0.0: return FreeFloatPercentage(obj, children=children, parent=parent) else: return TotalFloatPercentage(obj, children=children, parent=parent) elif isinstance(obj, SizedObj): return FixedSpan(obj, children=children, parent=parent) else: return None if __name__ == "__main__": a = FillerSpan() b = FixedSpan( 40, ) c = FillerSpan() s0 = FixedSpan( 100, ( TotalPercentualSpan( 2.5, ), a, b, c, TotalPercentualSpan( 1.1, ), ), ) s1 = FixedSpan( 100, ( FreePercentualSpan( 49.0, ), FillerSpan(), FixedDependentSpan( (a+b+c), ), FixedSpan( 4, ), FillerSpan(), FreePercentualSpan( 51.0, ), ), ) #print '\n'.join(s0.dump()) s0.update() print '\n'.join(s0.dump()) print #print '\n'.join(s1.dump()) s1.update() print '\n'.join(s1.dump())
true
43150a1c2494481111af193036cdf216f429e989
Python
Utkarsh2802/gspyproj
/random/4.py
UTF-8
539
3.28125
3
[]
no_license
def maxSubArraySum(a, size): max_so_far = -9999999999 max_ending_here = 0 start = 0 end = 0 s = 0 for i in range(0, size): max_ending_here += a[i] if max_so_far <= max_ending_here: max_so_far = max_ending_here start = s end = i if max_ending_here < 0: max_ending_here = 0 s = i + 1 print(max_so_far-max(a[start:end+1])) nn=int(input()) arr=list(map(int,input().split())) maxSubArraySum(arr,nn)
true
1ff4b63a433079a022f3907050368e8fdaa4ca97
Python
ck89119/Algorithm
/LeetCode/minimum_depth_of_binary_tree.py
UTF-8
587
3.375
3
[]
no_license
#!/usr/bin/python # Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param root, a tree node # @return an integer def minDepth(self, root): if root == None: return 0 if root.left == None and root.right == None: return 1 h1 = 99999999999 if root.left != None: h1 = self.minDepth(root.left) h2 = 99999999999 if root.right != None: h2 = self.minDepth(root.right) return min(h1, h2) + 1
true
2610fc380cdf39684dbd2ca8984e3691f22169c0
Python
NieGuozhang/Python-Spider
/07.动态加载数据处理/01.演示程序.py
UTF-8
1,083
2.984375
3
[]
no_license
from selenium import webdriver from selenium.webdriver.common.keys import Keys import time # 打开Chrome浏览器 # 'r'是防止字符转义的 driver = webdriver.Chrome(r'./chromedriver') # 浏览器最大化 driver.maximize_window() # 打开猿人学首页 driver.get('https://www.yuanrenxue.com') time.sleep(3) # 滑动到页面中间处 driver.execute_script("window.scrollTo(0,document.body.scrollHeight/2)") time.sleep(3) # 滑动到页面最下方 driver.execute_script("window.scrollTo(0,document.body.scrollHeight)") time.sleep(3) # 滑动到页面最上方 driver.execute_script("window.scrollTo(0,0)") time.sleep(3) # 通过html的class属性来定位链接位置,并点击 driver.find_element_by_class_name('slide-left').click() time.sleep(3) # 定位页面右上角的搜索图标并点击 driver.find_element_by_class_name('search-show').click() # 找到输入框 search = driver.find_element_by_class_name("search-input") # 输入 Python教程 search.send_keys(u'python教程') time.sleep(7) # 回车 search.send_keys(Keys.RETURN) time.sleep(5) driver.quit()
true
51d8ca578f6048bd52db01f636077a6439b22a9f
Python
devyash/ctci
/arrays and strings/URLify.py
UTF-8
611
3.921875
4
[]
no_license
""" replace " " with %20 in a string """ def urlify(string1, length1): """ Args: string, length of string Return: replace string """ string2 = "" for i in range(length1): if string1[i] == " ": string2 = string2 + "%20" else: string2 = string2 + string1[i] return string2 def main(): """ args: None return: Print URLified string """ string_test = "Mr John Smith " length_test = 13 print("Replaced string: " + urlify(string_test, length_test)) if __name__ == '__main__': main()
true
974c57c800d76938c4c563d5ddc7b0359a8e746b
Python
omryzw/FakeNewsDetectionNLP
/mixdop.py
UTF-8
10,365
2.90625
3
[]
no_license
# this module checks if the article already exists import math import string import pymysql import re from datetime import date import random mydb = pymysql.connect( host="35.224.191.214", user="omrizw", password="omoomo97", database="edith" ) mycursor = mydb.cursor(pymysql.cursors.DictCursor) # translation table is a global variable # mapping upper case to lower case and # punctuation to spaces translation_table = str.maketrans(string.punctuation + string.ascii_uppercase, " " * len(string.punctuation) + string.ascii_lowercase) # returns a list of the words def get_words_from_line_list(text): text = text.translate(translation_table) word_list = text.split() return word_list # counts frequency of each word # returns a dictionary which maps # the words to their frequency. def count_frequency(word_list): D = {} for new_word in word_list: if new_word in D: D[new_word] = D[new_word] + 1 else: D[new_word] = 1 return D # returns dictionary of (word, frequency) # pairs from the previous dictionary. def word_frequencies_for_file(filename): # line_list = read_file(filename) line_list = filename word_list = get_words_from_line_list(line_list) freq_mapping = count_frequency(word_list) return freq_mapping # returns the dot product of two documents def dotProduct(D1, D2): Sum = 0.0 for key in D1: if key in D2: Sum += (D1[key] * D2[key]) return Sum def vector_angle(D1, D2): numerator = dotProduct(D1, D2) denominator = math.sqrt(dotProduct(D1, D1) * dotProduct(D2, D2)) return math.acos(numerator / denominator) def documentSimilarity(filename_1, filename_2): sorted_word_list_1 = word_frequencies_for_file(filename_1) sorted_word_list_2 = word_frequencies_for_file(filename_2) distance = vector_angle(sorted_word_list_1, sorted_word_list_2) return distance # print("The distance between the documents is: % 0.2f " % distance) def checkDocSimilarity(qdoc): mycursor.execute("SELECT * FROM articles") myresult = mycursor.fetchall() minscore = documentSimilarity(qdoc, myresult[0]['article']) minID = myresult[0]['sourceid'] articleid = myresult[0]['articleid'] i = 0 while i < len(myresult): # # check if the text exists partially if myresult[i]['article'].find(qdoc) != -1: minID = 0 minscore = 1000 minID = myresult[i]['sourceid'] articleid = myresult[i]['articleid'] else: distance = documentSimilarity(qdoc, myresult[i]['article']) if distance < minscore: minscore = distance minID = myresult[i]['sourceid'] articleid = myresult[i]['articleid'] i += 1 return finalizeSimilarity(qdoc, minscore, minID, articleid) def checkDocSimilarity(qdoc): mycursor.execute("SELECT * FROM articles") myresult = mycursor.fetchall() minscore = documentSimilarity(qdoc, myresult[0]['article']) minID = myresult[0]['sourceid'] articleid = myresult[0]['articleid'] i = 0 while i < len(myresult): # # check if the text exists partially if myresult[i]['article'].find(qdoc) != -1: minscore = 1000 minID = myresult[i]['sourceid'] articleid = myresult[i]['articleid'] break else: distance = documentSimilarity(qdoc, myresult[i]['article']) if distance < minscore: minscore = distance minID = myresult[i]['sourceid'] articleid = myresult[i]['articleid'] i += 1 return finalizeSimilarity(qdoc, minscore, minID, articleid) def finalizeSimilarity(qdoc, minscore, minID, articleid): # if min score is 1000 then it partially exists at minid # if less than 0.8 return full article with label if((minscore < 0.8) or (minscore == 1000)): # article already exist so send back full article and label sql = "SELECT article,atype FROM articles WHERE articleid = %s" query = (articleid,) mycursor.execute(sql, query) myresult = mycursor.fetchall() mydb.commit() final = [] for x in myresult: final.append(x['article']) final.append(x['atype']) return formatforMessage('similar', final) else: return scanforLinks(qdoc) def normalizeRate(score): OldMax = 100 OldMin = 0 NewMax = 5 NewMin = 0 OldValue = score OldRange = (OldMax - OldMin) NewRange = (NewMax - NewMin) NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin return math.floor(NewValue) # print(math.floor(NewValue)) def scanforLinks(article): regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))" url = re.findall(regex, article) links = [x[0] for x in url] if len(links) != 0: # get score sql = "SELECT score,sourceid FROM sources WHERE source = %s" query = (links[0],) mycursor.execute(sql, query) myresult = mycursor.fetchall() if mycursor.rowcount != 0: # meaning that the link exists in the database final = [] for x in myresult: final.append(x['score']) final.append(x['sourceid']) # send score & article to NLP mydb.commit() return nlpCheck(final[0], final[1], article) else: # link exists but not in database so it is an unknown source return scanforMediaMention(article) elif len(links) == 0: # if no links scan for media mention else return score return scanforMediaMention(article) def scanforMediaMention(article): # compare authors from database to aricle sql = "SELECT score,source FROM sources WHERE stype = 0" mycursor.execute(sql) myresult = mycursor.fetchall() mydb.commit() final = [] for row in myresult: if row['source'] in article: final.append(row['score']) final.append(row['source']) else: pass if (len(final) != 0): # author exists return nlpCheck(final[0], final[1], article) else: # unknown are given a rating of 40% return nlpCheck(0.4, 'aaa222', article) def finalscore(article,sscore, nscore, source): # get weights from file finalscore = math.floor(((sscore*75) + nscore)) nomScore = normalizeRate(finalscore) if (addRate(source, nomScore) == 1): adjustSourceScore(source, finalscore) final = [] final.append(finalscore) if finalscore >=79: addToExistingDataSet(source,article,1) else: addToExistingDataSet(source, article, 0) return formatforMessage('nlp', final) def addRate(source, nomScore): sql = "INSERT INTO rates(source,score) VALUES (%s,%s)" query = (source, nomScore) mycursor.execute(sql, query) mydb.commit() return 1 def adjustSourceScore(source, finalscore): f1 = 0 f2 = 0 f3 = 0 f4 = 0 f5 = 0 sql = "SELECT score FROM rates WHERE source = %s" query = (source,) mycursor.execute(sql, query) if mycursor.rowcount > 100: myresult = mycursor.fetchall() mydb.commit() for x in myresult: if x['score'] == 1: f1 = f1 + 1 elif x['score'] == 2: f2 = f2 + 1 elif x['score'] == 3: f3 = f3 + 1 elif x['score'] == 4: f4 = f4 + 1 elif x['score'] == 5: f5 = f5 + 1 ftotal = f1 + f2 + f3 + f4 + f5 sum_score = (f1*1) + (f2*2) + (f3*3) + (f4*4) + (f5*5) new_score = sum_score/ftotal normalizedScore = normalizeUpdatedScore(new_score) sql2 = "UPDATE sources SET score = %s WHERE source = %s" query2 = (normalizedScore, source,) mycursor.execute(sql2, query2) mydb.commit() return 1 else: return 0 def normalizeUpdatedScore(score): OldMax = 5 OldMin = 0 NewMax = 1 NewMin = 0 OldValue = score OldRange = (OldMax - OldMin) NewRange = (NewMax - NewMin) NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin return NewValue def formatforMessage(mtype, result): #mtype tells us how the array is arranged and where it is coming from status = '' if mtype == 'similar': if result[1] == 0: status = 'Fake' elif result[1] == 1: status = 'Real' message = 'This article matches an article has been already been classified as ' + status + ' News Please view the full article here : ' + str(result[0]) elif mtype == 'nlp': if(result[0] >= 80): message = 'This article has been classified as ' + str(result[0]) + ' percent Real news. This means the article came from a trustworthy source.' elif(result[0] <= 79 and result[0] >= 70): message = 'This article has been classified at ' + str(result[0]) + ' percent. This means the article might contain bits of false information, please take caution when sharing such articles.' elif(result[0] <= 69): message = 'This article has been classified as Fake News! at ' + str(result[0]) + ' percent.Please do not further share this article' else: message = 'We have encounterd an error' return message def nlpCheck(sscore, source, article): nscore = 24.4 return finalscore(article,sscore, nscore, source) def addToExistingDataSet(source,article,atype): # generate random article id N = 6 res = ''.join(random.choices(string.ascii_uppercase + string.digits, k=N)) articleid = str(res) #get date today = str(date.today()) #default username is customer uname = 'customer' sql = "INSERT INTO articles (articleid,article,atype,adate,sourceid,uname) VALUES(%s,%s,%s,%s,%s,%s)" query = (articleid,article,atype,today,source,uname,) mycursor.execute(sql, query) mydb.commit() return 1
true
2f6da0839a9a52bbcfa9e190f79af10f79a3f5a0
Python
AaronNolan/College-Lab-Sheets
/word-counts.py
UTF-8
144
2.96875
3
[]
no_license
#!/usr/bin/env python import sys li = sys.stdin.readlines() i = 0 while i < len(li): li_s = li[i].split() print(len(li_s)) i += 1
true
07778d0438d30d23a7b5966914aadcc3c398cf64
Python
yuansuixin/Python_Learning
/fishc/009/test1.py
UTF-8
432
3.34375
3
[]
no_license
# 验证用户密码 times =3 password='hello' temp=input("请输入密码:") while times>0: if '*' in temp: temp=input("密码中不能有*,您还有3次机会,请重新输入:") continue times-=1 if temp=='hello': print("密码成功,进入程序。。") break else: password=input("密码错误,您还有"+str(times)+"次机会,请重新输入:")
true
97ac12936e425c18f869fa3cf57996439d433d77
Python
ALICE5/Python
/py_demo/哥德巴赫猜想.py
UTF-8
679
3.59375
4
[]
no_license
# usr/bin/env python3 import time from math import sqrt start = time.time() n = 100000 isprime = lambda p: all([p % d for d in range(2, int(sqrt(p)) + 1)]) # all(iterable): 如果iterable所有元素不为0、''、False或者iterable为空 # all(iterable)返回True 否则返回False for i in range(6, n + 1, 2): for j in range(2, i // 2 + 1): if isprime(j) and isprime(i - j): break else: print('fail at {}'.format(i)) break print('\r{:.2f}%'.format(i / n * 100), end='') else: print('\nsuccess: all evens in {} is the sum of two primes.'.format(n)) end = time.time() print('consum time: {:.3f} seconds'.format(end - start))
true
e65bd5b4c05dc3fbf2b3aea46cadedbe7aba9c20
Python
melvic-ybanez/nqueens
/NQueens/NQueens.py
UTF-8
1,544
3.546875
4
[]
no_license
''' Created on Dec 14, 2014 @author: melvic ''' import sys def has_row_threats(board, col): predicate = lambda i: i != col and board[col] == board[i] return has_threats(col, predicate) def has_diagonal_threats(board, col): predicate = lambda i: abs(i - col) == abs(board[i] - board[col]) != 0 return has_threats(col, predicate) def has_threats(col, predicate): for i in range(col): if predicate(i): return True return False def generate_solutions(board_size): board = [0] size = lambda: len(board) while True: if board[-1] > board_size - 1: if size() == 1: return del board[-1] board[-1] += 1 elif has_row_threats(board, size() - 1) or has_diagonal_threats(board, size() - 1): board[-1] += 1 elif size() == board_size: yield board board[-1] += 1 else: board.append(0) if __name__ == '__main__': def get_board_size(): board_size = raw_input('Enter the size of the board:') try: return int(board_size) except ValueError: pass board_size = get_board_size() if not board_size: print 'Invalid board size' sys.exit() solutions = generate_solutions(board_size) count = 0 for solution in solutions: print solution count += 1 print 'Total number of distinct solutions:', count
true
47aa3b0d65091b8db945399e0ad5c656cab515d9
Python
vapelavsky/intersog-testtask
/main.py
UTF-8
4,130
4.375
4
[]
no_license
# Test Task for Intersog class Human: """This is the human from TikTok and she can entertain you. Available methods: 1. drink 2. travel 3. sum calculate 4. sleep 5. show hobbies""" name: str age: int sex: str hobbies: list country: str job: str hair: str height: int def get_name(self): return self.__name def set_name(self, name: str): self.__name = name def get_age(self): return self.__age def set_age(self, age: int): self.age = age def get_sex(self): return self.__sex def set_sex(self, sex: str): self.__sex = sex def get_height(self): return self.__height def set_height(self, height: float): self.__height = height def get_hair(self): return self.__hair def set_hair(self, hair: str): self.__hair = hair def get_job(self): return self.__job def set_job(self, job: str): self.__job = job def get_hobbies(self, hobbies: list): return self.__hobbies def set_hobbies(self, hobbies: list): self.__hobbies = hobbies def __init__(self, name, age, sex, hobbies, country, job, hair, height): self.__name = name self.__age = age self.__sex = sex self.__hobbies = hobbies self.__country = country self.__job = job self.__hair = hair self.__height = height print(f"Hello! My name is {self.__name} with {self.__hair} hair. \n" f"I'm {self.__age} years old and my height is {self.__height}. \n" f"My sex is {self.__sex}. \n" f"I'm from {self.__country}. \n" f"I'm working as {self.__job}") print("My hobbies is") for hobby in self.__hobbies: print(hobby) def __call__(self, username): return f'Hello, my name is Zuzie! Nice to meet you, {username}' def __str__(self, username, friendname): return f'My name is Zuzie, your name is {username}, name of your friend is {friendname}' @staticmethod def drink(name): return f'So, I drunk {name}' def travel(self, place): return f'I travelled from {self.__country} to the {place}' @staticmethod def calculate(a, b): return f"I'm very smart, so I can calculate sum of given numbers.\n" \ f"Given numbers is {a} and {b}.\n" \ f"So, your answer is {a + b} \n" @staticmethod def sleep(hours): return f"Ok, dear, I will sleep for {hours} hours" def showhobbies(self): print("My hobbies is") for hobby in self.__hobbies: print(hobby) return "Great, that's all" class Child(Human): def __init__(self, name, age, sex, hobbies, country, job, hair, height): if age < 16: job = "Unemployed" super(Child, self).__init__(name, age, sex, hobbies, country, job, hair, height) def set_job(self, job: str): if self.__age < 16: self.__job = "Unemployed" def walk(self): print(f"{self.get_name()} walked successfully") def work(self): if self.get_job() == "Unemployed": print(f"I'm child, come on.") if __name__ == "__main__": human = Human(name="Zuzie", age=26, sex="Female", hobbies=["Music", "Active leisure", "TikTok"], country="Ukraine", job="Database crasher on production", hair="Blue", height=180) print(human.drink("Cola")) print(human.travel("Switzerland")) print(human.calculate(12, 14)) print(human.sleep(12)) print(human.showhobbies()) child = Child(name="Zizzie", age=13, sex="Male", hobbies=["TikTok", "Active leisure", "Computers"], country="Ukraine", job="Database crasher on production", hair="Yellow", height=156) child.walk() child.work()
true
5fba652d432a1dc5e5c4af102319b4f28a9e00de
Python
AndrewLrrr/otus-big-data
/hw1-data-gathering/storages/tests/test_storages.py
UTF-8
2,336
2.765625
3
[]
no_license
import os import shutil import unittest from storages import file_storage class TestFileStorage(unittest.TestCase): cache_prefix = 'test' def setUp(self): self.c = file_storage.FileStorage(self.cache_prefix) self.test_dir = self.c._directory_path def tearDown(self): if os.path.isdir(self.test_dir): shutil.rmtree(self.test_dir) def test_put(self): self.c._directory_path = self.test_dir res = self.c.put('f', 'test') self.assertTrue(res) self.assertEqual(1, len(os.listdir(self.test_dir))) def test_get(self): self.c._directory_path = self.test_dir self.c.put('f', 'test') c2 = file_storage.FileStorage(self.cache_prefix) c2._directory_path = self.test_dir val = c2.get('f') self.assertEqual('test', val) def test_has(self): self.c._directory_path = self.test_dir self.c.put('f', 'test') c2 = file_storage.FileStorage(self.cache_prefix) c2._directory_path = self.test_dir self.assertTrue(c2.has('f')) self.assertFalse(c2.has('f2')) def test_delete(self): self.c._directory_path = self.test_dir self.c.put('f', 'test') self.assertEqual(1, len(os.listdir(self.test_dir))) c2 = file_storage.FileStorage(self.cache_prefix) c2._directory_path = self.test_dir c2.delete('f') self.assertEqual(0, len(os.listdir(self.test_dir))) def test_flush(self): self.c._directory_path = self.test_dir self.c.put('f', 'test') self.c.put('f2', 'test2') self.assertEqual(2, len(os.listdir(self.test_dir))) self.assertTrue(os.path.isdir(self.test_dir)) self.c.flush() self.assertFalse(os.path.isdir(self.test_dir)) def test_keys(self): self.c._directory_path = self.test_dir self.c.put('f', 'test') self.c.put('f2', 'test2') self.c.put('f3', 'test3') self.assertEqual(['f', 'f2', 'f3'], self.c.keys()) self.c.put('f4', 'test3') self.assertEqual(['f', 'f2', 'f3', 'f4'], self.c.keys()) c2 = file_storage.FileStorage(self.cache_prefix) self.c.delete('f2') self.assertEqual(['f', 'f3', 'f4'], c2.keys()) if __name__ == '__main__': unittest.main()
true
b005e314bbd1215db5b8b6ae53bca82ecfb93365
Python
jerbarnes/subjectivity_quantified
/Scripts/create_vec_reps.py
UTF-8
5,674
3
3
[]
no_license
import logging import sys import os import re from gensim.models import Word2Vec from nltk import word_tokenize class MySentences(object): """For a corpus that has a number of subfolders, each containing a set of text files. Supposes that in each text file, there is one sentence per line. Yields one tokenized sentence at a time.""" def __init__(self, dirname, num_sents=10e+10, encoding='utf8'): self.dirname = dirname self.num_sents = num_sents self.encoding = encoding def __iter__(self): lines = 0 for fname in os.listdir(self.dirname): text_file = os.path.join(self.dirname, fname) for line in open(text_file, encoding=self.encoding): if lines < self.num_sents: line = re.sub('<.*?>', '', line) lines += 1 yield word_tokenize(line.lower(), language='english') else: break class MySentences2(object): """Takes a list of directory names of corpora as input. Supposes that the corpus is arranged either directly as .txt files or as subdirectories containing text files. The corpora should contain one sentence per line. Yields one tokenized sentence at a time.""" def __init__(self, dirnames, num_sents=10e+1000, num_words=10e+1000, encoding='utf8'): self.dirnames = dirnames self.num_sents = num_sents self.num_words = num_words self.encoding = encoding def __iter__(self): lines = 0 words = 0 for dirname in self.dirnames: for fname in os.listdir(dirname): if os.path.isdir(dirname+fname): sub_dir = os.path.join(dirname, fname) for fname in os.listdir(sub_dir): text_file = os.path.join(sub_dir, fname) for line in open(text_file, encoding=self.encoding): if lines < self.num_sents and words < self.num_words: line = re.sub('<.*?>', '', line) lines += 1 tokens = word_tokenize(line.lower(), language='english') words += len(tokens) yield tokens else: break else: text_file = os.path.join(dirname, fname) for line in open(text_file, encoding=self.encoding): if lines < self.num_sents and words < self.num_words: line = re.sub('<.*?>', '', line) lines += 1 tokens = word_tokenize(line.lower(), language='english') words += len(tokens) yield tokens else: break class MySentences3(object): """Takes a list of directory names of corpora as input. Supposes that the corpus is arranged either directly as .txt files or as subdirectories containing text files. The corpora should contain one sentence per line. Yields one tokenized sentence at a time. You can take a certain amount of sentences or words from each corpus used in training the gensim model.""" def __init__(self, dirnames, num_sents=10e+1000, num_words=10e+1000, encoding='utf8'): self.dirnames = dirnames self.num_sents = num_sents self.num_words = num_words self.encoding = encoding def __iter__(self): for dirname in self.dirnames: lines = 0 words = 0 for fname in os.listdir(dirname): if os.path.isdir(dirname+fname): sub_dir = os.path.join(dirname, fname) for fname in os.listdir(sub_dir): text_file = os.path.join(sub_dir, fname) for line in open(text_file, encoding=self.encoding): if lines < self.num_sents and words < self.num_words: line = re.sub('<.*?>', '', line) lines += 1 tokens = word_tokenize(line.lower(), language='english') words += len(tokens) yield tokens else: break else: text_file = os.path.join(dirname, fname) for line in open(text_file, encoding=self.encoding): if lines < self.num_sents and words < self.num_words: line = re.sub('<.*?>', '', line) lines += 1 tokens = word_tokenize(line.lower(), language='english') words += len(tokens) yield tokens else: break if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) corpus_names = ['multiun','opener','europarl'] for corpus_name in corpus_names: corpus_dir_list = 'opinionfinderv2.0/database/docs/'+corpus_name sents = MySentences(corpus_dir_list) out_dir = 'LanguageModels/'+corpus_name + '/model1' model = Word2Vec(sents, size=300, window=10, workers=5, negative=5, sg=1) model.save(out_dir)
true
4bae6178e55beb12e3d8ab8c5b28ffe0961ed0dc
Python
pinnakakalyani/python-programming
/assignment1.py
UTF-8
200
2.734375
3
[]
no_license
#create 2 d list of characters in message #[['H','e','L','P']......] message=['Help','run','fight','request'] print(list(map(list,message))) print(list(map(lambda m:m ,list(message))))
true
9214db20e5866507a629840bc2e7ed540b21cead
Python
csinva/imodelsX
/imodelsx/sasc/api.py
UTF-8
5,670
2.71875
3
[ "MIT" ]
permissive
from typing import List, Callable, Tuple, Dict import imodelsx.sasc.m1_ngrams import imodelsx.sasc.m2_summarize import imodelsx.sasc.m3_generate import numpy as np import pprint from collections import defaultdict def explain_module_sasc( # get ngram module responses text_str_list: List[str], mod: Callable[[List[str]], List[float]], ngrams: int = 3, all_ngrams: bool = True, num_top_ngrams: int = 75, use_cache: bool = True, cache_filename: str = None, # generate explanation candidates llm_checkpoint: str = "text-davinci-003", llm_cache_dir: str = ".llm_cache", num_summaries: int = 3, num_top_ngrams_to_use: int = 30, num_top_ngrams_to_consider: int = 50, # generate synthetic strs num_synthetic_strs: int = 20, seed: int = 0, verbose: bool = True, ) -> Dict[str, List]: """ Parameters ---------- text_str_list: List[str] The list of text strings to use to extract ngrams mod: Callable[[List[str]], List[float]] The module to interpret ngrams: int The order of ngrams to use (3 is trigrams) all_ngrams: bool If True, use all ngrams up to ngrams. If False, use only ngrams num_top_ngrams: int The number of top ngrams to return use_cache: bool If True, use the cache cache_filename: str The filename to use for the module ngram cache llm_checkpoint: str The checkpoint to use for the llm llm_cache_dir: str The cache directory to use for the llm num_summaries: int The number of candidate explanations to generate num_top_ngrams_to_use: int The number of top ngrams to select num_top_ngrams_to_consider: int The number of top ngrams to consider selecting from num_synthetic_strs: int The number of synthetic strs to generate verbose: bool If True, print out progress seed: int The seed to use for the random number generator Returns ------- explanation_dict: Dict[str, List] top_explanation_str: str The top explanation str top_explanation_score: float The top explanation score explanation_strs: List[str] The list of candidate explanation strs (this may have less entries than num_summaries if duplicate explanations are generated) explanation_scores: List[float] The list of corresponding candidate explanation scores ngrams_list: List[str] The list of top ngrams ngrams_scores: List[float] The list of top ngram scores strs_relevant: List[List[str]] The list of synthetically generated relevant strs strs_irrelevant: List[List[str]] The list of synthetically generated irrelevant strs """ explanation_dict = defaultdict(list) # compute scores for each ngram ( ngrams_list, ngrams_scores, ) = imodelsx.sasc.m1_ngrams.explain_ngrams( text_str_list=text_str_list, mod=mod, ngrams=ngrams, all_ngrams=all_ngrams, num_top_ngrams=num_top_ngrams, use_cache=use_cache, cache_filename=cache_filename, ) explanation_dict["ngrams_list"] = ngrams_list explanation_dict["ngrams_scores"] = ngrams_scores # compute explanation candidates llm = imodelsx.llm.get_llm(llm_checkpoint, llm_cache_dir) ( explanation_strs, _, ) = imodelsx.sasc.m2_summarize.summarize_ngrams( llm, ngrams_list, num_summaries=num_summaries, num_top_ngrams_to_use=num_top_ngrams_to_use, num_top_ngrams_to_consider=num_top_ngrams_to_consider, seed=seed, ) explanation_dict["explanation_strs"] = explanation_strs # score explanation candidates on synthetic data for explanation_str in explanation_strs: strs_rel, strs_irrel = imodelsx.sasc.m3_generate.generate_synthetic_strs( llm, explanation_str=explanation_str, num_synthetic_strs=num_synthetic_strs, verbose=verbose, ) explanation_dict["strs_relevant"].append(strs_rel) explanation_dict["strs_irrelevant"].append(strs_irrel) # evaluate synthetic data (higher score is better) explanation_dict["explanation_scores"].append( np.mean(mod(strs_rel)) - np.mean(mod(strs_irrel)) ) # sort everything by scores sort_inds = np.argsort(explanation_dict["explanation_scores"])[::-1] ks = list(explanation_dict.keys()) for k in [ "explanation_strs", "explanation_scores", "strs_relevant", "strs_irrelevant", ]: explanation_dict[k] = [explanation_dict[k][i] for i in sort_inds] for k in ["explanation_strs", "explanation_scores"]: explanation_dict["top_" + k[:-1]] = explanation_dict[k][0] return explanation_dict if __name__ == "__main__": # an overly simple example of a module that responds to the length of a string mod = lambda str_list: np.array([len(s) for s in str_list]) # in this dataset the longest strings happen to be animals, so we are searching for the explanation "animals" text_str_list = [ "red", "blue", "x", "1", "2", "hippopotamus", "elephant", "rhinoceros", ] explanation_dict = explain_module_sasc( text_str_list, mod, ngrams=1, num_summaries=2, num_top_ngrams=3, num_top_ngrams_to_consider=3, num_synthetic_strs=2, ) pprint.pprint(explanation_dict)
true