blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a5fb9742eca221d315e6baed5d50c0309eb85094 | Python | noalevitzky/Intro2CS | /02/test_largest_and_smallest.py | UTF-8 | 1,547 | 3.171875 | 3 | [] | no_license | # this function tests function 4 for 5 different cases
from largest_and_smallest import largest_and_smallest
def test_func_4():
large, small = largest_and_smallest(-1, 1, 100)
if large == 100 and small == -1: # case 1 is true
large, small = largest_and_smallest(100, 1, -1)
if large == 100 and small == -1: # case 2 is true
large, small = largest_and_smallest(1, 100, -1)
if large == 100 and small == -1: # case 3 is true
large, small = largest_and_smallest(0, 0, 0)
if large == 0 and small == 0: # case 4 is true
large, small = largest_and_smallest(0, 100, 0)
if large == 100 and small == 0: # case 5 is true
print("Function 4 test success")
return True
else: # case 5 is false
print("function 4 test fail")
return False
else: # case 4 is false
print("function 4 test fail")
return False
else: # case 3 is false
print("function 4 test fail")
return False
else: # case 2 is false
print("function 4 test fail")
return False
else: # case 1 is false
print("function 4 test fail")
return False
if __name__ == "__main__" :
test_func_4()
| true |
adf4a1278746dad0ab2cc4cf375a5b4c11861199 | Python | XomakNet/teambuilder | /utils/metrics.py | UTF-8 | 11,671 | 2.78125 | 3 | [] | no_license | from utils.math import normalized_vector_distance
from typing import List
from typing import Set
from math import ceil
from numpy import mean
from models.user import User
__author__ = 'Xomak'
class TeamMetric:
pass
class MetricTypes:
LISTS = "lists"
DESIRES = "desires"
COMMON = "common"
class ClusteringMetric:
balance_weight = 0.5
average_weight = 0.5
def __init__(self, users_sets: List[Set[User]], metric_type=MetricTypes.COMMON):
self.users_sets = users_sets
self.metric_type = metric_type
self._calculate_metric()
def _get_team_metric_value(self, team_metric: TeamMetric):
if self.metric_type == MetricTypes.COMMON:
return team_metric.get_final_metric_value()
elif self.metric_type == MetricTypes.DESIRES:
return team_metric.get_desires_metric_value()
elif self.metric_type == MetricTypes.LISTS:
return team_metric.get_lists_metric_value()
else:
raise ValueError("Wrong metric type '%s'" % str(self.metric_type))
def _calculate_metric(self):
self.sets_metrics = []
self.total_users_count = 0
self.balance_metric = 0
for user_set in self.users_sets:
self.total_users_count += len(user_set)
self.team_size = int(ceil(self.total_users_count / len(self.users_sets)))
overload_members_count = 0
lack_members_count = 0
total_metrics = []
for user_set in self.users_sets:
set_metric = TeamMetric(user_set)
self.sets_metrics.append(set_metric)
total_metrics.append(self._get_team_metric_value(set_metric))
overload = len(user_set) - self.team_size
lack = self.team_size - 1 - len(user_set)
if overload > 0:
overload_members_count += overload
if lack > 0:
lack_members_count += lack
self.balance_metric = 1 - (lack_members_count + overload_members_count)/self.total_users_count
self.average_metric = mean(total_metrics)
self.min_metric = min(total_metrics)
self.max_metric = max(total_metrics)
def get_final_metric(self):
return self.balance_metric*self.balance_weight + self.average_metric*self.average_weight
def get_average_metric(self):
return self.average_metric
def get_balance_metric(self):
return self.balance_metric
def __str__(self):
return "Average: {}, min: {}, max: {}, balance: {}".format(self.average_metric, self.min_metric,
self.max_metric, self.balance_metric)
def get_average_desires_metric(self):
metrics = []
for metric in self.sets_metrics:
metrics.append(metric.get_desires_metric().get_final_metric_value())
return mean(metrics)
class TeamMetric:
_desires_metric_weight = 0.3
_list_metric_weight = 1 - _desires_metric_weight
def __init__(self, users_set):
if len(users_set) != 0:
self._lists_count = len(next(iter(users_set)).get_lists()) # Kostiyl' was suggested by Kostya
self._lists_metrics = []
for list_id in range(0, self._lists_count):
self._lists_metrics.append(TeamListMetric(users_set, list_id))
self._desires_metric = TeamDesiresMetric(users_set)
self._calc_final_metric_value()
else:
raise ValueError("Error calculating final metric: users_set is empty")
def _calc_final_metric_value(self):
"""
Calculate final metric of the team, and set it to private field self._final_metric_value.
Final metric = sum(correct_final_list_metric)/correct_final_list_metric_count
:return: nothing :)
"""
self._final_metric_value = 0
# Calculate final lists metric
correct_list_metrics_count = 0
final_list_metrics_value = 0
for list_id in range(0, self._lists_count):
if self._lists_metrics[list_id].is_valid():
final_list_metrics_value += self._lists_metrics[list_id].get_final_metric_value()
correct_list_metrics_count += 1
if correct_list_metrics_count > 0:
final_list_metrics_value /= correct_list_metrics_count
# Calculate final desires metric
final_desires_metric_value = self._desires_metric.get_final_metric_value()
# Calculate final metric
self._final_metric_value = final_list_metrics_value * self._list_metric_weight + \
final_desires_metric_value * self._desires_metric_weight
def __str__(self):
result_str = "\nMetrics of the team:"
for list_metric in self._lists_metrics:
result_str += "\n%s" % str(list_metric)
result_str += "\n%s" % str(self._desires_metric)
result_str += "\nFINAL METRIC: %s" % str(self._final_metric_value)
return result_str
def __cmp__(self, other):
if self._final_metric_value > other.get_final_metric_value():
return 1
elif self._final_metric_value < other.get_final_metric_value():
return -1
else:
return 0
def get_final_metric_value(self):
return self._final_metric_value
def get_all_lists_metrics(self):
return self._lists_metrics
def get_list_metric_by_list_number(self, list_number):
"""
Returns TeamListMetric object for the list with number list_number
:param list_number: number of the list
:return: None if list_number < 0 or list_number >= len(lists_metrics), otherwise - TeamListMetric object
"""
if list_number < -1 or list_number >= len(self._lists_metrics):
return None
return self._lists_metrics[list_number]
def get_desires_metric(self):
"""
Returns desires metric for the team
:return: TeamDesiresMetric object
"""
return self._desires_metric
def get_desires_metric_value(self):
return self._desires_metric.get_final_metric_value()
def get_lists_metric_value(self):
return mean(list(map(lambda list_metric: list_metric.get_final_metric_value(), self._lists_metrics)))
class TeamDesiresMetric:
def __init__(self, users_set):
self._is_valid = False
self._desires_coeff = 0
self._desires_of_users_list = []
self._all_desires_count = 0
self._all_satisfied_desires_count = 0
self._calculate_metric(users_set)
def is_valid(self):
return self._is_valid
def __str__(self):
if self._is_valid:
return "Desires metric = %s" % self._desires_coeff
else:
return "Desires metric isn't valid"
def get_final_metric_value(self):
return self._desires_coeff
def _calculate_metric(self, users_set):
# Create set of users' ids
users_ids_set = set()
for user in users_set:
users_ids_set.add(user.get_id())
for user in users_set:
# Get users which have been selected by user
users_selected_by_user = user.get_selected_people()
desires_of_user_count = len(users_selected_by_user)
satisfied_desires_count = len(users_selected_by_user.intersection(users_set))
# Increment global counters
self._all_desires_count += desires_of_user_count
self._all_satisfied_desires_count += satisfied_desires_count
# Append desires data for the user
satisfied_desires_percentage = 0 if desires_of_user_count == 0 else \
round(satisfied_desires_count / desires_of_user_count, 2)
self._desires_of_users_list.append({"id": user.get_id(),
"desires_count": desires_of_user_count,
"satisfied_desires_count": satisfied_desires_count,
"satisfied_desires_percentage": satisfied_desires_percentage})
# Calculating desires coefficient
if self._all_desires_count == 0:
self._desires_coeff = 0
else:
self._desires_coeff = round(self._all_satisfied_desires_count / self._all_desires_count, 2)
self._is_valid = True
class TeamListMetric:
_significant_threshold = 0.5
_negative_threshold = -0.3
_average_coeff_weight = 0.4
_threshold_coeff_weight = 0.6
def __init__(self, users_set, list_id):
self._is_valid = False
self._list_id = list_id
self._average_coeff = 0
self._threshold_coeff = 0
self._pair_distances = []
self._pairs_count = 0
self._users_count = 0
self._behind_significant_threshold_count = 0
self._behing_negative_threshold_count = 0
self._calculate_metric(users_set)
def get_list_id(self):
return self._list_id
def is_valid(self):
return self._is_valid
def __str__(self):
if self._is_valid:
return "List %d, avg = %s, threshold = %s" % (self._list_id, self._average_coeff, self._threshold_coeff)
else:
return "Metric of list %d isn't valid" % self._list_id
def get_final_metric_value(self):
return self._average_coeff_weight * self._average_coeff + self._threshold_coeff_weight * self._threshold_coeff
def get_average_coeff(self):
"""
Returns normalized (0..1) average pair distance
:return: average coefficient
"""
return self._average_coeff
def get_threshold_coeff(self):
"""
Returns normalized(0..1) value, that shows quality of connections between users.
C = 1 - (behind_significant_threshold_pairs_count / pairs_count), or
C = 0, if there are at least 1 pair with distance < behind_negative_threshold
:return: threshold coefficient
"""
return self._threshold_coeff
def _calculate_metric(self, user_set):
if len(user_set) <= 1:
return
self._users_count = len(user_set)
user_list = list(user_set)
# Calculate and compare distances between different users
for first_user_index in range(0, self._users_count):
for second_user_index in range(first_user_index + 1, self._users_count):
self._pairs_count += 1
distance = normalized_vector_distance(user_list[first_user_index].get_lists()[self._list_id],
user_list[second_user_index].get_lists()[self._list_id])
self._pair_distances.append(distance)
if distance < self._significant_threshold:
self._behind_significant_threshold_count += 1
if distance < self._negative_threshold:
self._behing_negative_threshold_count += 1
# Calculating average coefficient
average_pair_distance = sum(self._pair_distances) / len(self._pair_distances)
average_pair_distance_normalized = (average_pair_distance + 1) / 2
self._average_coeff = (round(float(average_pair_distance_normalized), 2))
# Calculating threshold coefficient
self._threshold_coeff = 1 - self._behind_significant_threshold_count / self._pairs_count
if self._behing_negative_threshold_count > 0:
self._threshold_coeff = 0
self._threshold_coeff = (round(self._threshold_coeff, 2))
self._is_valid = True
| true |
632071363ce6c84af97622aba324c74ecf31529e | Python | burnbrigther/py_practice | /ex11_ex2.py | UTF-8 | 272 | 3.71875 | 4 | [] | no_license | print "What is your name?",
name = raw_input()
print "What is your favorite sport?",
fav_sport = raw_input()
print "What is your favorite team?",
fav_team = raw_input()
print "Hello %r. Your favorite sport is %r and your favorite team is %r?" % (name, fav_sport, fav_team) | true |
53ee519096cfd85c4776d37dcdde3e820e5262da | Python | pengguanjun/imagepy | /sciwx/widgets/menubar.py | UTF-8 | 2,205 | 2.890625 | 3 | [
"BSD-2-Clause"
] | permissive | import wx
def hot_key(txt):
sep = txt.split('-')
acc, code = wx.ACCEL_NORMAL, -1
if 'Ctrl' in sep: acc|= wx.ACCEL_CTRL
if 'Alt' in sep: acc|= wx.ACCEL_ALT
if 'Shift' in sep: acc|= wx.ACCEL_SHIFT
fs = ['F%d'%i for i in range(1,13)]
if sep[-1] in fs:
code = 340+fs.index(sep[-1])
elif len(sep[-1])==1: code = ord(sep[-1])
return acc, code
class MenuBar(wx.MenuBar):
def __init__(self, app):
wx.MenuBar.__init__(self)
self.app = app
app.SetMenuBar(self)
def parse(self, ks, vs, pt, short, rst):
if isinstance(vs, list):
menu = wx.Menu()
for kv in vs:
if kv == '-': menu.AppendSeparator()
else: self.parse(*kv, menu, short, rst)
pt.Append(1, ks, menu)
else:
item = wx.MenuItem(pt, -1, ks)
if ks in short:
rst.append((short[ks], item.GetId()))
f = lambda e, p=vs: p().start(self.app)
self.Bind(wx.EVT_MENU, f, item)
pt.Append(item)
def Append(self, id, item, menu):
wx.MenuBar.Append(self, menu, item)
def load(self, data, shortcut={}):
rst = []
for k,v in data[1]:
self.parse(k, v, self, shortcut, rst)
rst = [(*hot_key(i[0]), i[1]) for i in rst]
return wx.AcceleratorTable(rst)
def on_menu(self, event): print('here')
def clear(self):
while self.GetMenuCount()>0: self.Remove(0)
if __name__ == '__main__':
class P:
def __init__(self, name):
self.name = name
def start(self):
print(self.name)
def __call__(self):
return self
data = ('menu', [
('File', [('Open', P('O')),
'-',
('Close', P('C'))]),
('Edit', [('Copy', P('C')),
('A', [('B', P('B')),
('C', P('C'))]),
('Paste', P('P'))])])
app = wx.App()
frame = wx.Frame(None)
menubar = MenuBar()
menubar.load(data)
frame.SetMenuBar(menubar)
frame.Show()
app.MainLoop()
| true |
926aa9fe4b5a33cc0d3c40102f23496473fb446b | Python | Milan-Chicago/ds-guide | /house_robber.py | UTF-8 | 1,882 | 3.90625 | 4 | [] | no_license | """
House Robber I
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed,
the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and
it will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of
money you can rob tonight without alerting the police.
Input: nums = [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4
Approach: Top down DP --> Start at the first house, end at last house
Time complexity: O(n), O(n)
Space complexity: O(n), O(1)
"""
from typing import List
from unittest import TestCase
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums:
return 0
elif len(nums) == 1:
return nums[0]
rob, not_rob = 0, 0
for num in nums:
rob, not_rob = not_rob + num, max(rob, not_rob)
return max(rob, not_rob)
class TestRob(TestCase):
def setUp(self) -> None:
self.nums_1 = [1, 2, 3, 1]
self.nums_2 = [2, 7, 9, 3, 1]
self.nums_3 = [0]
self.nums_4 = [2, 1, 1, 2]
self.nums_5 = [183, 219, 57, 193, 94, 233, 202, 154, 65, 240, 97, 234, 100, 249, 186, 66, 90, 238, 168, 128,
177, 235, 50, 81, 185, 165, 217, 207, 88, 80, 112, 78, 135, 62, 228, 247, 211]
def test_rob(self) -> None:
obj = Solution()
self.assertEqual(4, obj.rob(self.nums_1))
self.assertEqual(12, obj.rob(self.nums_2))
self.assertEqual(0, obj.rob(self.nums_3))
self.assertEqual(4, obj.rob(self.nums_4))
self.assertEqual(3365, obj.rob(self.nums_5))
| true |
7cfb2cc28b45807ff97a02c28f73032b28487fb4 | Python | Samarkina/PythonTasks | /FirstTasks/3.py | UTF-8 | 179 | 3.078125 | 3 | [] | no_license | x = int(input())
h = int(input())
m = int(input())
go_to_sleep = h*60 + m
timer = x + go_to_sleep
print("{} часов".format(timer//60))
print("{} минут".format(timer%60))
| true |
42a56f1a1e7ea642b6151f5628191c8910ec1719 | Python | cduck/qutrits | /cirq/ops/reversible_composite_gate.py | UTF-8 | 3,031 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the inverse method of a CompositeOperation & ReversibleEffect."""
from typing import TypeVar, Generic, cast
from cirq import abc
from cirq.extension import Extensions
from cirq.ops import gate_features, op_tree, raw_types
def _reverse_operation(operation: raw_types.Operation,
ext: Extensions) -> raw_types.Operation:
"""Returns the inverse of an operation, if possible.
Args:
operation: The operation to reverse.
ext: Used when casting the operation into a reversible effect.
Returns:
An operation on the same qubits but with the inverse gate.
Raises:
TypeError: The operation isn't reversible.
"""
reversible_op = ext.cast(gate_features.ReversibleEffect, operation)
return cast(raw_types.Operation, reversible_op.inverse())
def inverse(root: op_tree.OP_TREE,
extensions: Extensions = None
) -> op_tree.OP_TREE:
"""Generates OP_TREE inverses.
Args:
root: An operation tree containing only invertible operations.
extensions: For caller-provided implementations of gate inverses.
Returns:
An OP_TREE that performs the inverse operation of the given OP_TREE.
"""
ext = extensions or Extensions()
return op_tree.transform_op_tree(
root=root,
op_transformation=lambda e: _reverse_operation(e, ext),
iter_transformation=lambda e: reversed(list(e)))
TOriginal = TypeVar('TOriginal', bound='ReversibleCompositeGate')
class ReversibleCompositeGate(gate_features.CompositeGate,
gate_features.ReversibleEffect,
metaclass=abc.ABCMeta):
"""A composite gate that gets decomposed into reversible gates."""
def inverse(self: TOriginal
) -> '_ReversedReversibleCompositeGate[TOriginal]':
return _ReversedReversibleCompositeGate(self)
class _ReversedReversibleCompositeGate(Generic[TOriginal],
gate_features.CompositeGate,
gate_features.ReversibleEffect):
"""A reversed reversible composite gate."""
def __init__(self, forward_form: TOriginal) -> None:
self.forward_form = forward_form
def inverse(self) -> TOriginal:
return self.forward_form
def default_decompose(self, qubits):
return inverse(self.forward_form.default_decompose(qubits))
| true |
b3ef5ec67f31bf480a905c8dea8df089796add83 | Python | codeslord/onlycode | /onlycode.py | UTF-8 | 2,341 | 3.40625 | 3 | [
"MIT"
] | permissive | import os
import tqdm
import sys, token, tokenize
def get_filepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
""" Strip comments and docstrings from a file.
"""
def do_file(fname):
""" Run on just one file.
"""
source = open(fname, encoding='utf-8')
mod = open(fname + ",strip", "w", encoding='utf-8')
prev_toktype = token.INDENT
first_line = None
last_lineno = -1
last_col = 0
tokgen = tokenize.generate_tokens(source.readline)
for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
if 0: # Change to if 1 to see the tokens fly by.
print("%10s %-14s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
"%d.%d-%d.%d" % (slineno, scol, elineno, ecol),
ttext, ltext
))
if slineno > last_lineno:
last_col = 0
if scol > last_col:
mod.write(" " * (scol - last_col))
if toktype == token.STRING and prev_toktype == token.INDENT:
# Docstring
mod.write("#--")
elif toktype == tokenize.COMMENT:
# Comment
mod.write("##\n")
else:
mod.write(ttext)
prev_toktype = toktype
last_col = ecol
last_lineno = elineno
if __name__ == '__main__':
# Run the above function and store its results in a variable.
full_file_paths = get_filepaths(r"_path_to_dir_")
for f in tqdm.tqdm(full_file_paths):
try:
if f.endswith(".py"):
do_file(f)
except FileNotFoundError:
continue
| true |
4a62d99ee8d4b62355b455576e50ef279f95741f | Python | jorgeaninoc/CodingProblems | /A. Chat room.py | UTF-8 | 308 | 3.34375 | 3 | [] | no_license | s = raw_input()
w = "hello"
aux = ""
length = range(len(s))
index = 0
for i in s:
if i in "hello":
s = s.replace(i,"")
aux += i
aux2 = ""
for i in aux:
if index < len(w) and i==w[index]:
aux2 += i
index+=1
if aux2 == "hello":
print("YES")
else:
print("NO") | true |
5c192d8366bf35614a4f4a5a7fd21d9966d63799 | Python | fillipe-felix/ExerciciosPython | /Lista02/Ex008.py | UTF-8 | 984 | 4.4375 | 4 | [] | no_license | """
Faça um programa que pergunte o preço de três produtos e informe qual
produto você deve comprar, sabendo que a decisão é sempre pelo mais barato.
"""
produto1 = float(input("Digite o valor do primeiro produto: "))
produto2 = float(input("Digite o valor do segundo produto: "))
produto3 = float(input("Digite o valor do terceiro produto: "))
if(produto1 < produto2 and produto1 < produto3):
print("O primeiro produto é o mais barato")
elif(produto2 < produto1 and produto2 < produto3):
print("O segundo produto é o mais barato")
elif(produto3 < produto2 and produto3 < produto1):
print("O terceiro produto é o mais barato")
elif(produto1 == produto2):
print("O primeiro produto e o segundo tem o mesmo preço e são mais baratos")
elif(produto1 == produto3):
print("O primeiro produto e o terceiro tem o mesmo preço e são mais baratos")
elif(produto2 == produto3):
print("O segundo produto e o terceiro tem o mesmo preço e são mais baratos") | true |
d943c8958a3eb8b1f7ebe6de45a9993fb211c95f | Python | linziyi96/pytorch | /torch/_vmap_internals.py | UTF-8 | 3,880 | 2.703125 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | import torch
import functools
from torch import Tensor
import warnings
REQUIRE_SAME_MAP_SIZE = (
'vmap: Expected all tensors to have the same size in the mapped dimension, '
'got sizes {sizes} for the mapped dimension'
)
ELEMENT_MUST_BE_TENSOR = (
'vmap({fn}, ...): `{fn}` must only return Tensors, got '
'type {out} for return {idx}.'
)
MUST_RETURN_TENSORS = (
'vmap({fn}, ...): `{fn}` must only return Tensors, got '
'type {out} as the return.'
)
NO_INPUTS = (
'vmap({fn})(<inputs>): got no inputs. Maybe you forgot '
'to add inputs, or you are trying to vmap over a '
'function with no inputs. The latter is unsupported.'
)
# Checks that all args have the same batch dim size.
def _validate_and_get_batch_size(args):
batch_sizes = [arg.size(0) for arg in args]
if batch_sizes and any([size != batch_sizes[0] for size in batch_sizes]):
raise ValueError(REQUIRE_SAME_MAP_SIZE.format(sizes=batch_sizes))
return batch_sizes[0]
def _validate_inputs_and_get_batch_size(args, fn_name):
if len(args) == 0:
raise ValueError(NO_INPUTS.format(fn=fn_name))
return _validate_and_get_batch_size(args)
# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
def _unwrap_batched(batched_outputs, vmap_level, batch_size):
# NOTE [Ignored _remove_batch_dim, _add_batch_dim]
# There is something wrong with our type bindings for functions that begin
# with '_', see #40397.
if isinstance(batched_outputs, Tensor):
return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, 0) # type: ignore
return tuple(torch._remove_batch_dim(out, vmap_level, batch_size, 0) # type: ignore
for out in batched_outputs)
# Checks that `fn` returned one or more Tensors and nothing else.
# NB: A python function that return multiple arguments returns a single tuple,
# so we are effectively checking that `outputs` is a single Tensor or a tuple of
# Tensors.
def _validate_outputs(outputs, fn_name):
if isinstance(outputs, Tensor):
return
if not isinstance(outputs, tuple):
raise ValueError(MUST_RETURN_TENSORS.format(fn=fn_name, out=type(outputs)))
for idx, output in enumerate(outputs):
if isinstance(output, Tensor):
continue
raise ValueError(ELEMENT_MUST_BE_TENSOR.format(fn=fn_name, out=type(output), idx=idx))
# This is the global tracker for how many nested vmaps we are currently inside.
VMAP_LEVEL = 0
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
# sends those into func, and then unwraps the output BatchedTensors. Operations
# on BatchedTensors perform the batched operations that the user is asking for.
def vmap(func, in_dims=0, out_dims=0):
warnings.warn(
'torch.vmap is an experimental prototype that is subject to '
'change and/or deletion. Please use at your own risk.')
if in_dims != 0:
raise NotImplementedError('NYI: vmap with `in_dims` other than 0')
if out_dims != 0:
raise NotImplementedError('NYI: vmap with `out_dims` other than 0')
@functools.wraps(func)
def wrapped(*args):
if any(not isinstance(arg, Tensor) for arg in args):
raise NotImplementedError('NYI: vmap with non-tensor inputs')
batch_size = _validate_inputs_and_get_batch_size(args, func.__name__)
global VMAP_LEVEL
VMAP_LEVEL += 1
try:
# See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
batched_inputs = [torch._add_batch_dim(arg, 0, VMAP_LEVEL) for arg in args] # type: ignore
batched_outputs = func(*batched_inputs)
_validate_outputs(batched_outputs, func.__name__)
return _unwrap_batched(batched_outputs, VMAP_LEVEL, batch_size)
finally:
VMAP_LEVEL -= 1
return wrapped
| true |
94f2bd2af03345e98d7dc02640bac5b60e249724 | Python | panhaichun/panzixuan | /pzx/account.py | UTF-8 | 1,107 | 2.828125 | 3 | [] | no_license | import pzx.user
from security.authentication import AccountNotFoundException
class Account:
'''
表示一个用户账号
'''
def __init__(self, user):
if not user:
raise ValueError('参数[user]不能为空')
self.__user = user
def get_name(self):
return self.__user.username
def get_password(self):
return self.__user.password
def get_roles(self):
roles = set()
roles.update([role.name for role in self.__user.get_contain_roles()])
return roles
def get_prototype(self):
return self.__user
def __eq__(self, obj):
if self is obj:
return True
return self.__user == obj.get_prototype() if isinstance(obj, Account) else False
def __hash__(self):
return hash(self.__user)
def __str__(self):
return str(self.__user)
def find_account(name):
try:
return Account(pzx.user.get_by_username(name))
except pzx.user.UserNotFoundException:
raise AccountNotFoundException('账号[%s]不存在' % name)
| true |
c7f1f99a72f2f5f3ce0d205109d2fbf730e0d5e7 | Python | adamlporter/Fitbit_Analysis | /HRZ_ToCSV.py | UTF-8 | 1,593 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 19 11:40:19 2019
@author: adam
"""
import json
import pandas as pd
import glob
import time
def processFile(fName):
# open the file, import the json data, and convert data from strings to
# datetime and numeric values
from pandas.io.json import json_normalize
with open(fname, "r") as f:
rawData = json_normalize(json.load(f))
rawData['dateTime'] = pd.to_datetime(rawData['dateTime'])
cols_rename = {'value.valuesInZones.BELOW_DEFAULT_ZONE_1' : 'below1',
'value.valuesInZones.IN_DEFAULT_ZONE_1' : 'zone1',
'value.valuesInZones.IN_DEFAULT_ZONE_2' : 'zone2',
'value.valuesInZones.IN_DEFAULT_ZONE_3' : 'zone3'
}
rawData.rename(columns = cols_rename, inplace = True)
return(rawData)
workd = '/home/adam/Google Drive/Python/DataSets/Data_fitbit/'
dataTypes = ['time_in_heart_rate_zones']
# the following data types are recorded in UTC
print('Files will be saved to directory with Fitbit datafiles with the following name:')
print('Processing:')
for dt in dataTypes:
dataFiles = glob.glob(workd+dt+'*.json')
toSaveData = pd.DataFrame()
for fname in dataFiles:
print(fname)
toSaveData = pd.concat([toSaveData,processFile(fname)])
HRZdf = toSaveData
date = time.strftime("%y%m%d"+"_"+"%H%M")
print('Saving -> '+dt+'Data_' + date + '.csv')
toSaveData.to_csv(path_or_buf = workd + dt +'Data_' + date + '.csv',index=False)
| true |
b8ba4f9a3c6028fdfacfc99a2e35adb631772ca1 | Python | gaw1ik/Visualizers-In-Python | /stars.py | UTF-8 | 6,011 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 00:08:46 2020
This script generates a GIF file which contains an animation
resembling things ranging from stars to planets to biological
cells.
The variables in the Inputs section can
be adjusted to produce a wide range of visual results. The Inputs
section functions as a basic user interface, and only
the Inputs should be needed to operate the script.
The GIF
will automatically be saved to the working directory, so
please be mindful of where that is.
@author: Brian
"""
#%% Setup Environment
from IPython import get_ipython
get_ipython().magic('reset -sf')
import numpy as np
import math
import cairo
from PIL import Image
from random import seed, choice, random
#%% Inputs
filename = 'stars_1.gif'
# GIF Options
s = 1 # width and height of frame
nframes = 200 # number of frames
frame_duration = 1000/24*3
pixel_scale = 400
# Size/Arrangement Options
seed_number = 8
n_stars = 50
r_seq = np.arange(0.0010,0.0040,0.0001)*2.5# choice sequence for initial radii
# r_seq = [0.010]*50 + [0.030]*20 + [0.140]*5
# Twinlke Options
# bright_seq = np.arange(0.6, 0.8, 0.05)
bright_seq = [0.8]*5 + [1]
# bright_seq = [1]
# Jitter Options
jitter_mode = 'jitter'
jit_size = 0.0005
jit_seq = [-jit_size] + [0] + [jit_size]
# Color Options
color_mode = 'constant'
fill_mode = 'fill'
line_thickness = 0.01
color_const = [1,1,0.8]
c_seq = np.arange(0.5,1.05,0.05)
#%% Define Functions
""" This function chooses the initial color for the bubbles
in one of two modes: constant or random"""
def choose_color(color_mode,c_seq):
if color_mode=='constant':
b = choice([0.3,0.6,1.0])
color = [color_const[0]*b,color_const[1]*b,color_const[2]*b] # some constant color
elif color_mode=='random':
# c_seq = np.arange(0.5,1.05,0.05)
color = [choice(c_seq),choice(c_seq),choice(c_seq)]
return color
#%% Make star Class
class star:
# Initializer / Instance Attributes
def __init__(self, base_center, radius, base_color):
self.base_center = base_center
self.inst_center = base_center
self.radius = radius
self.base_color = base_color
self.inst_color = base_color
def draw(self,ctx,fill_mode):
ctx.set_source_rgb(self.inst_color[0], self.inst_color[1], self.inst_color[2])
ctx.arc(self.inst_center[0], self.inst_center[1], self.radius, 0, 2*math.pi)
if (fill_mode=='fill'):
ctx.fill()
elif (fill_mode=='outline'):
ctx.set_line_width(line_thickness)
ctx.stroke()
def twinkle(self,bright_seq):
b = choice(bright_seq)
self.inst_color = [self.base_color[0]*b, self.base_color[1]*b, self.base_color[2]*b]
def jitter(self,jit_size,jit_seq,jitter_mode):
if (jitter_mode=='jitter'):
cx, cy = self.base_center[0], self.base_center[1]
elif (jitter_mode=='walk'):
cx, cy = self.inst_center[0], self.inst_center[1]
jit_x, jit_y = choice(jit_seq), choice(jit_seq)
self.inst_center = [cx+jit_x, cy+jit_y]
#%% Function for converting cairo surface to PIL image
""" This function converts a cairo surface to a
PIL image. The drawings below are done using cairo,
and thus are contained within so-called cairo surfaces.
They must be converted back to PIL Images so that the PIL
save function can be used to save the animation
as a GIF."""
def pilImageFromCairoSurface( surface ):
cairoFormat = surface.get_format()
if cairoFormat == cairo.FORMAT_ARGB32:
pilMode = 'RGB'
# Cairo has ARGB. Convert this to RGB for PIL which supports only RGB or
# RGBA.
argbArray = np.frombuffer( bytes(surface.get_data()), 'c' ).reshape( -1, 4 )
rgbArray = argbArray[ :, 2::-1 ]
pilData = rgbArray.reshape( -1 ).tostring()
else:
raise ValueError( 'Unsupported cairo format: %d' % cairoFormat )
pilImage = Image.frombuffer( pilMode,
( surface.get_width(), surface.get_height() ), pilData, "raw",
pilMode, 0, 1 )
pilImage = pilImage.convert( 'RGB' )
return pilImage
#%% Determine random starting positions for stars
r_seq = list(r_seq)
bright_seq = list(bright_seq)
stars = []
seed(seed_number)
for _ in range(n_stars):
cx = random()*s
cy = random()*s
r0 = choice(r_seq)
b = choice(bright_seq)
c0 = choose_color(color_mode,c_seq)
# c0 = (np.uint8(choice(c_seq)*255),np.uint8(choice(c_seq)*255),np.uint8(choice(c_seq)*255))
new_starro = star( [cx, cy], r0, c0 )
stars.append(new_starro)
#%% Make the images for the GIF
images = []
seed(1)
# Setup "frames" list (frame numbers)
frames = []
for f in range(nframes):
frames.append(f)
# Make each frame
for f in frames:
# initialize the blank canvas each frame
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, s*pixel_scale, s*pixel_scale)
ctx = cairo.Context(surface)
ctx.scale(pixel_scale, pixel_scale) # Normalizing the canvas
# Draw each star and update their attributes
for i, starro in enumerate(stars, 1):
# Draw the star
starro.draw(ctx,fill_mode)
# Twinkle the star
starro.twinkle(bright_seq)
# Jitter the star for the next frame
starro.jitter(jit_size,jit_seq,jitter_mode)
# Move the star for the next frame
# starro.walk(walk_size)
im = pilImageFromCairoSurface(surface)
images.append(im)
#%% Save as GIF
images[0].save(filename,
save_all=True,
append_images=images[1:],
optimize=False,
duration=frame_duration,
loop=0)
print('DONE') | true |
449203e0d964e0212d79dbceeec0522838c26245 | Python | ShaheerAhmed2030/Probablisitic-Model | /Decision Tree/ID3.py | UTF-8 | 3,967 | 3.34375 | 3 | [] | no_license | # Importing Libraries
import pandas as pd
import numpy as np
from pprint import pprint
# Functions
# Entropy Function
def Entropy(Target):
Elements,Count = np.unique(Target,return_counts = True)
entropy = np.sum([(-Count[i]/np.sum(Count))*np.log2(Count[i]/np.sum(Count)) for i in range(len(Elements))])
return entropy
# Information Gain
def InformationGain(data,Branch,target="COVID-19"):
total_entropy = Entropy(data[target])
Vals,Count = np.unique(data[Branch],return_counts = True)
Average = np.sum([(Count[i]/np.sum(Count))*Entropy(data.where(data[Branch]==Vals[i]).dropna()[target]) for i in range(len(Vals))])
return total_entropy - Average
# Algorithm
def ID3(data,originalData,features,target_attribute_name="COVID-19",parent_node_class=None):
# if all values are 0 or all 1
if len(np.unique(data[target_attribute_name])) <= 1:
return np.unique(data[target_attribute_name])[0]
#if the dataset is empty
elif len(data) == 0:
return np.unique(originalData[target_attribute_name])[np.argmax(np.unique(originalData[target_attribute_name],return_counts=True)[1])]
# no features
elif len(features) == 0:
return parent_node_class
else:
parent_node_class = np.unique(data[target_attribute_name])[np.argmax(np.unique(data[target_attribute_name], return_counts= True)[1])]
# Calculating the column with maximum gain
Items = [InformationGain(data, i ,target_attribute_name) for i in features]
Highest = np.argmax(Items)
Feature_to_pick = features[Highest]
# Removing the feature which we picked from the list
features = [i for i in features if i!= Feature_to_pick]
# Creating the Tree
Tree = {Feature_to_pick:{}}
# Using recursion to grow the tree further
for i in np.unique(data[Feature_to_pick]):
sub_data = data.where(data[Feature_to_pick] == i).dropna()
Sub_Tree = ID3(sub_data,originalData,features,target_attribute_name,parent_node_class)
# Inserting the tree
Tree[Feature_to_pick][i] = Sub_Tree
return Tree
# Prediction by iterating the tree
def Prediction(query,tree,default = 1):
for i in list(query.keys()):
if i in list(tree.keys()):
try:
result = tree[i][query[i]]
except:
return default
result = tree[i][query[i]]
if isinstance(result,dict):
return Prediction(query, result)
else:
return result
# Splitting Dataset
def Training(Data,Spliter):
training_Data = Data.iloc[:Spliter].reset_index(drop = True)
testing_Data = Data.iloc[Spliter:].reset_index(drop = True)
return training_Data,testing_Data
# Algorithm Testing
def Test(data,tree,target = "COVID-19"):
Queries = data.iloc[:,:-1].to_dict(orient = "records")
predicted = pd.DataFrame(columns = ["predicted"])
#Accuracy
for i in range(len(data)):
predicted.loc[i,"predicted"] = Prediction(Queries[i],tree,1.0)
print("Algorithm Prediction Accuracy:",(np.sum(predicted["predicted"]==data[target])/len(data))*100,'%')
# Query Prediction
def TestPositive(data,tree):
Queries = data.iloc[:,:,].to_dict(orient = "records")
print("Test Results:",end=" ")
if (Prediction(Queries[0],tree,1.0) == 1):
print("Corona Positive")
else:
print("Corona Negative")
# Main Method
def Main():
# Importing Dataset
Data = pd.read_csv("Cleaned.csv")
Data.drop(Data.columns[0],axis = 1, inplace = True)
# Splitting up the data
Training_data , Testing_Data = Training(Data,400)
Query = pd.read_csv("Query.csv")
tree = ID3(Training_data,Training_data,Training_data.columns[:-1])
pprint(tree)
Test(Testing_Data,tree)
TestPositive(Query, tree)
Main() | true |
d527677e3409238dca961f5cd033df6f78c8fb22 | Python | sup/mathlib | /mathlib/core/matrix.py | UTF-8 | 26,189 | 4.28125 | 4 | [] | no_license | #matrix.py
#Charles J. Lai
#August 18, 2013
import unimath
"""
======
matrix
======
This module contains a wrapper class that represents a matrix and
functions that can be used to analyze/evaluate a matrix class.
Addition, subtraction, division, and multiplication functionality are
implemented as cases within the appropriate functions in the unimath (core
math functions) module for Matrix class types as well as overloaded
operators defined within the Matrix class.
Usage/Syntax
============
Creating a matrix using a constructor or setter requires the use of a
Matlab-like syntax. For example, a 3*3 matrix:
1 2 3
3 2 1
5 1 4
has a raw string expression using Matlab syntax: "1,2,3;3,2,1;5,1,4"
with no spaces, using semi-colons to separate rows, and commas to
separate elements within each row. X and Y coordinates also mimic
Matlab's non-zero coordinate system for traversing a matrix.
Contents
--------
* Array - A one-dimensional array class with various methods
* Matrix - A matrix class with various methods
* fliplr() - Reverses an array or column of a matrix
* ref() - Returns the row echelon form of a matrix
* rref() - Returns the reduced row echelon form of a matrix
* trans() - Returns the transpose of a matrix or array
* det() - Returns the determinant of a matrix
* diag() - Returns the diagonalized form of a matrix
* eig() - Returns the eigenvector of a matrix
"""
class Matrix(object):
"""
Instances represent a two-dimensional matrix.
===========
Description
===========
A matrix is a type of data structure that can hold two two-dimensional
data on a discrete map of points. #ADD MORE DESCRIPTION/HISTORY#
The actual matrix iteself is represented by a two-dimensional list
wrapped in this matrix class which provides methods and properties
for various matrix-based applications and analysis. Our implementation
was created with Matlab-style constructors and matrix traversals in
mind. This syntax is familiar with most people and thus a solid design
decision for this module/package.
Coordinate positions in the matrix start with ones (for example, the
top left value in a matrix is at coordinate point [1, 1]).
[1, 1] <----- 1 2 3 4 5
1 2 3 4 5
1 2 3 4 5
1 2 3 4 5
1 2 3 4 5
The number of rows is denoted by the "m" property value and the number
of columns is denoted by the "n" property value (again familiar notation
for most people). The entire matrix object is of size m x n.
Each matrix contains a variety of different methods for adding,
subtracting, setting, and getting values/rows/columns as well as
overloaded operators for matrix scalar addition/subtraction in
addition to matrix multiplication and division.
Note: Addition, subtraction, multiplication, and division can also be done
with add(), sub(), mult(), div() from the unimath module in the core package.
"""
#Fields
_data = None
_m = 0
_n = 0
_is_square = False
#Immutable Properties
@property
def m(self):
return self._m
@property
def n(self):
return self._n
@property
def is_square(self):
return self._is_square
#Built-in Functions
def __init__(self, raw_data):
"""
Constructor: Create a new instance of a Matrix
Precondition: raw_data is a valid matrix input according to the
description in the meta documentation.
"""
#Case 1: raw_data is a string -> Matlab style
self._parse(raw_data)
self._check()
#Arithemetic Operator Overloading
def __add__(self, other):
"""
Returns: The sum of two matrices
OR
Procedure: Scalar addition of self.
Precondition: The two matrices must be the same size.
"""
#Matrix Addition
if type(self) == Matrix and type(other) == Matrix:
#Enforce the preconditions
assert self.m == other.m, "The two matrices differ in number of rows"
assert self.n == other.n, "The two matrices differ in number of cols"
#Create the return matrix
sum_matrix = Matrix(unimath.zeros(self.m, other.n))
#Subtract the two matrices
x = 1
while x <= other.m:
row_a = self.get_row(x)
row_b = other.get_row(x)
summation = unimath.add(row_a, row_b)
sum_matrix.set_row(x,summation)
x += 1
return sum_matrix
#Add a scalar to every value in the matrix
if type(other) == int or type(other) == float:
for x in range(0, self.m):
for y in range(0, self.n):
self._data[x][y] = self._data[x][y] + other
def __sub__(self, other):
"""
Returns: The difference of two matrices
OR
Procedure: Scalar subtraction of self.
Precondition: The two matrices must be the same size.
"""
#Matrix subtraction
if type(self) == Matrix and type(other) == Matrix:
#Enforce the preconditions
assert self.m == other.m, "The two matrices differ in number of rows"
assert self.n == other.n, "The two matrices differ in number of cols"
#Create the return matrix
difference_matrix = Matrix(unimath.zeros(self.m, other.n))
#Subtract the two matrices
x = 1
while x <= other.m:
row_a = self.get_row(x)
row_b = other.get_row(x)
difference = unimath.sub(row_a, row_b)
difference_matrix.set_row(x,difference)
x += 1
return difference_matrix
#Scalar subtraction
if type(other) == int or type(other) == float:
for x in range(0, self.m):
for y in range(0, self.n):
self._data[x][y] = self._data[x][y] - other
def __mul__(self, other):
"""
Returns: The product of two matrices such that [self] * [other]
OR
Procedure: Scalar multiplication of self by other.
Precondition: self.n == other.m
===========
Description
===========
Matrix multiplication is more than simple addition where cooresponding
values in the first matrix are added to the second matrix.
"""
#Matrix Multiplication
if type(self) == Matrix and type(other) == Matrix:
#Enforce the preconditions
assert self.n == other.m, "The dimensions of the matrices are invalid."
#Create the return matrix
product_matrix = Matrix(unimath.zeros(self.m, other.n))
#Multiply the two matrices
x = 1
y = 1
while y <= other.n:
col_b = other.get_col(y)
while x <= self.m:
#Multiply the all rows by the column and get the product
row_a = self.get_row(x)
product = unimath.mult(row_a, col_b)
final_sum = unimath.listsum(product)
#Set the product to the correct space in the product matrix
product_matrix.set(x, y, final_sum)
x += 1
#Reset the row counter and continue with the next column
x = 1
y += 1
return product_matrix
#Scalar multiplication
if type(other) == int or type(other) == float:
for x in range(0, self.m):
for y in range(0, self.n):
self._data[x][y] = self._data[x][y] * other
def __div__(self, other):
"""
Procedure: Scalar division of self.
"""
#Matrix Division
if type(self) == Matrix and type(other) == Matrix:
#Enforce the preconditions
assert self.n == other.m, "The dimensions of the matrices are invalid."
#Create the return matrix
quotient_matrix = Matrix(unimath.zeros(self.m, other.n))
#Multiply the two matrices
x = 1
y = 1
while y <= other.n:
col_b = other.get_col(y)
while x <= self.m:
#Multiply the all rows by the column and get the product
row_a = self.get_row(x)
quotient = unimath.div(row_a, col_b)
final_sum = unimath.listsum(product)
#Set the product to the correct space in the product matrix
quotient_matrix.set(x, y, final_sum)
x += 1
#Reset the row counter and continue with the next column
x = 1
y += 1
return quotient_matrix
#Scalar division
if type(other) == int or type(other) == float:
for x in range(0, self.m):
for y in range(0, self.n):
self._data[x][y] = self._data[x][y] / other
#Methods Proper
def disp(self, heading = "Matrix:"):
"""
Procedure: Prints a string representation of the matrix object
"""
print " "
print heading
print " "
#Iteratively print the elements of the matrix
for x in range(0, self.m):
for y in range(0, self.n):
print '%10.5f' % self._data[x][y],
print "\n"
def get(self, x, y):
"""
Returns: Value a specified coordinate point within the matrix.
Precondition: x and y are valid coordinates using Matlab coordinate
rules.
"""
return self._data[x-1][y-1]
def set(self, x, y, value):
"""
Procedure: Sets value at a specfied coordinate point to a new value.
Precondition: x and y are valid coordinates using Matlab coordinate
rules.
"""
#Check if the value specified is a string or an actual list.
assert type(value) == float or type(value) == int
self._data[x - 1][y - 1] = value
def get_row(self, row_number):
"""
Returns: The specified row in matrix self. The row is a python list.
"""
assert row_number <= self.m and row_number > 0, "This is not a valid row number"
return self._data[row_number - 1]
def set_row(self, row_number, row):
"""
Procedure: Sets the value for a specfied row.
"""
assert type(row) == list, "The row data given is not a list."
assert row_number >= 1 and row_number <= self.m, "The row number is invalid"
assert len(row) == self.n, "The row given is not a valid length"
col_number = 1
#Iterate over columns in a single row and set values.
for e in row:
self.set(row_number, col_number, e)
col_number += 1
def get_col(self, col_number):
"""
Returns: The specified col in matrix self. The col is a python list.
"""
assert col_number <= self.n and col_number > 0, "This is not a valid col number"
column = []
row_number = 0
while row_number < self.m:
column = column + [self._data[row_number][col_number - 1]]
row_number += 1
return column
def set_col(self, col_number, col):
"""
Procedure: Sets the value for a specified column.
"""
assert col_number <= self.n and col_number >= 1, "This is not a valid col number"
assert type(col) == list, "The col data given is not a list."
assert len(col) == self.m, "The column given is not a valid length."
row_number = 1
#Iterate over rows in a single column and set values
for e in col:
self.set(row_number, col_number, e)
row_number += 1
def add_row(self, row):
"""
Procedure: Adds a given row to the matrix to the bottom.
Precondition: The row must be have the same length as other rows in
the matrix. Argument passed to row is a python list.
"""
assert len(row) == self.n, "The row has an invalid length"
#Add the whole row to the end of the matrix
self._data = self._data + [row]
self._m += 1
def add_col(self, col):
"""
Procedure: Adds a given col to the matrix to the right
Precondition: The column must have the same length as other rows
"""
assert len(col) == self.m, "The column has an invalid length"
#Add each element to the end of each row
x = 0
for y in col:
self._data[x] = self._data[x] + [y]
x += 1
self._n += 1
def del_row(self, x):
"""
Procedure: Subtracts a given row from the matrix by row number. x
represents a specific row number.
Precondition: x must be a valid row number
"""
assert self.m >= x, "The row number given is too large"
assert x > 0, "Try again with a row number > 0"
self._data.remove(self._data[x - 1])
self._m -= 1
def del_col(self, y):
"""
Procedure: Subtracts a given column from the matrix by column number.
Precondition: y must be a valid column number.
"""
assert self.n >= y, "The column number given is too large"
assert y > 0, "Try again with a column number > 0"
for x in range(0, self.m):
self._data[x].remove(self._data[x][y-1])
self._n -= 1
#Helper Methods
def _parse(self, raw_data):
"""
Procedure: Parses the raw_data string and updates the Matrix object.
Called during object construction.
Precondition: raw_data is a valid input according to the description
in the meta documentation at the top of the document.
"""
#Case 1: raw_data is a string
if type(raw_data) == str:
#Some assertions statements to QC the raw_data input
assert type(raw_data) == str, "The matrix given is not a string"
assert raw_data[0] != "[", "Use Matlab matrix notation without braces"
#Initialize default values
self._m = 1
self._n = 1
#Split the matrix up into rows of strings and set m to len(self._data
self._data = raw_data.split(';')
self._m = len(self._data)
#Continue parsing the input, make rows into lists of numbers
for x in range(0, self.m):
self._data[x] = self._data[x].split(",")
self._data[x] = list(map(float, self._data[x]))
#Set n to the number of elements in each row
self._n = len(self._data[0])
#Set whether the matrix is a square matrix
if self.m == self.n:
self._is_square = True
#Case 2: raw_data is a valid 2D list in matrix form:
elif type(raw_data) == list:
self._data = raw_data
self._m = len(self._data)
self._n = len(self._data[0])
if self._m == self.n:
self._is_square = True
def _check(self):
"""
Procedure: Checks if the raw input was valid by QCing the properties
of the matrix. Called during object construction.
"""
#Test if the rest of the rows are the same length
for r in range(0, self.m):
assert len(self._data[r]) == self.n, "Invalid row lengths"
assert type(self._data[r]) == list, "Some rows aren't lists"
#== Matrix Functions =====================================================
def ref(matrix):
"""
Procedure: Row reduces a matrix.
"""
#Initialize variables:
lead = 1 #The index of the pivot column
col_count = matrix.n #The number of columns
row_count = matrix.m #The number of rows
for r in range(1, row_count+1):
#If we have finished RREF on the last column, end.
if col_count < lead:
return
#Start with the rth row when finding a pivor entry
i = r
#Find the pivot entry of the pivot column - first non-zero entry
while matrix.get(i, lead) == 0:
i += 1
#If the whole column is empty, go to the next column.
if row_count < i:
i = r
lead += 1
if col_count < lead:
return
#Swap rows i and r such that the pivot entry is above all others
row_i = matrix.get_row(i)
row_r = matrix.get_row(r)
matrix.set_row(i, row_r)
matrix.set_row(r, row_i)
#Update row_i and row_r lists for next steps
row_i = matrix.get_row(i)
row_r = matrix.get_row(r)
if matrix.get(r, lead) != 0:
#Divide the whole row by the pivot to get a 1
pivot = matrix.get(r,lead)
row_r = unimath.divlist(row_r, pivot)
matrix.set_row(r, row_r)
for i in range(r+1, row_count+1):
#Perform pivoted row operations to get all 0 below the pivot.
multiplier = matrix.get(i, lead)
row_r = matrix.get_row(r)
row_i = matrix.get_row(i)
row_r = unimath.mullist(row_r, multiplier)
sublist = lambda m,n: m-n
row_i = map(sublist, row_i, row_r)
matrix.set_row(i, row_i)
#Set the next column to be the pivoting column.
lead += 1
def rref(m):
"""
Returns: Reduced row echelon form of a matrix
"""
#Initialize variables:
orig_data = parse_list(m._data)
matrix = Matrix(orig_data) #New matrix to return as RREF
lead = 1 #The index of the pivot column
col_count = matrix.n #The number of columns
row_count = matrix.m #The number of rows
for r in range(1, row_count+1):
#If we have finished RREF on the last column, end.
if col_count < lead:
return matrix
#Start with the rth row when finding a pivor entry
i = r
#Find the pivot entry of the pivot column - first non-zero entry
while matrix.get(i, lead) == 0:
i += 1
#If the whole column is empty, go to the next column.
if row_count < i:
i = r
lead += 1
if col_count < lead:
#If last pivot is a 0, set the column as a zero vector
zero = [0 for x in range(row_count)]
matrix.set_col(col_count, zero)
return matrix
#Swap rows i and r such that the pivot entry is above all others
row_i = matrix.get_row(i)
row_r = matrix.get_row(r)
matrix.set_row(i, row_r)
matrix.set_row(r, row_i)
#Update row_i and row_r lists for next steps
row_i = matrix.get_row(i)
row_r = matrix.get_row(r)
if matrix.get(r, lead) != 0:
#Divide the whole row by the pivot to get a 1
pivot = matrix.get(r,lead)
row_r = unimath.divlist(row_r, pivot)
matrix.set_row(r, row_r)
for i in range(1, row_count+1):
#Perform pivoted row operations to get all 0 above and below pivot.
if i != r:
multiplier = matrix.get(i, lead)
row_r = matrix.get_row(r)
row_i = matrix.get_row(i)
row_r = unimath.mullist(row_r, multiplier)
sublist = lambda m,n: m-n
row_i = map(sublist, row_i, row_r)
matrix.set_row(i, row_i)
#Set the next column to be the pivoting column.
lead += 1
return matrix
def trans(matrix):
"""
Returns: The transpose of a given matrix.
"""
transpose = unimath.zeros(matrix.n, matrix.m)
transpose = Matrix(transpose)
col_count = 1
while col_count <= matrix.n:
col = matrix.get_col(col_count)
ele_count = 1
#Set each value in the column to cooresponding row values
for e in col:
transpose.set(col_count, ele_count, e)
ele_count += 1
#Repeat for the next column
col_count += 1
return transpose
def det(m):
"""
Returns: The determinant of a given matrix computed recursively.
"""
matrix = Matrix(parse_list(m._data))
#If the matrix isn't square, no determinant
if matrix.is_square:
size = matrix.n
d = 0
#2x2 matrix base case
if size == 2:
return (matrix.get(1,1)*matrix.get(2,2) - matrix.get(1,2)*matrix.get(2,1))
else:
for col in range(size+1)[1:]:
matrix = Matrix(parse_list(m._data))
if col % 2 != 0:
#Get the multiplier
coefficient = matrix.get(1,col)
#Create the submatrix
matrix.del_row(1)
matrix.del_col(col)
#Add the term to the determinant returned
d = d + coefficient * det(Matrix(parse_list(matrix._data)))
if col % 2 == 0:
#Get the multiplier
coefficient = -matrix.get(1,col)
#Create the submatrix
matrix.del_row(1)
matrix.del_col(col)
#Add the term to the determinant returned
d = d + coefficient * det(Matrix(parse_list(matrix._data)))
#return the determinant after summation
return d
def inv(m):
"""
Returns: The inverse of a given matrix.
"""
orig_data = parse_list(m._data)
matrix = Matrix(orig_data)
assert matrix.is_square, "The matrix is not square and not invertible."
inverse = identity(matrix.n)
#Initialize variables:
lead = 1 #The index of the pivot column
col_count = matrix.n #The number of columns
row_count = matrix.m #The number of rows
for r in range(1, row_count+1):
#If we have finished RREF on the last column, end.
if col_count < lead:
return inverse
#Start with the rth row when finding a pivot entry
i = r
#Find the pivot entry of the pivot column - first non-zero entry
while matrix.get(i, lead) == 0:
i += 1
#If the whole column is empty, go to the next column.
if row_count < i:
i = r
lead += 1
if col_count < lead:
return inverse
#Swap rows i and r such that the pivot entry is above all others
row_i = matrix.get_row(i)
row_r = matrix.get_row(r)
matrix.set_row(i, row_r)
matrix.set_row(r, row_i)
#Do the same for the identity
inv_row_i = inverse.get_row(i)
inv_row_r = inverse.get_row(r)
inverse.set_row(i, inv_row_r)
inverse.set_row(r, inv_row_i)
#Update row_i and row_r lists for next steps
row_i = matrix.get_row(i)
row_r = matrix.get_row(r)
inv_row_i = inverse.get_row(i)
inv_row_r = inverse.get_row(r)
if matrix.get(r, lead) != 0:
#Divide the whole row by the pivot to get a 1
pivot = matrix.get(r,lead)
row_r = unimath.divlist(row_r, pivot)
inv_row_r = unimath.divlist(inv_row_r, pivot)
matrix.set_row(r, row_r)
inverse.set_row(r, inv_row_r)
for i in range(1, row_count+1):
#Perform pivoted row operations to get all 0 above and below pivot.
if i != r:
multiplier = matrix.get(i, lead)
row_r = matrix.get_row(r)
row_i = matrix.get_row(i)
inv_row_r = inverse.get_row(r)
inv_row_i = inverse.get_row(i)
row_r = unimath.mullist(row_r, multiplier)
inv_row_r = unimath.mullist(inv_row_r, multiplier)
sublist = lambda m,n: m-n
row_i = map(sublist, row_i, row_r)
inv_row_i = map(sublist, inv_row_i, inv_row_r)
matrix.set_row(i, row_i)
inverse.set_row(i, inv_row_i)
#Set the next column to be the pivoting column.
lead += 1
return inverse
def QR(matrix):
"""
Returns: The QR decomposition of a matrix.
"""
pass
def diag(matrix):
"""
Returns: The diagonalized form of a given matrix.
"""
pass
def sym(matrix):
"""
Returns: The symmetric form of a given matrix.
"""
return matrix * trans(matrix)
def eig(matrix, option=None):
"""
Returns: eigenvectors/values of a given matrix. Changing the option
changes the algorithm used to compute eigenvectors/values
"""
pass
def identity(n):
"""
Returns: The identity matrix of size n
"""
identity_matrix = Matrix(unimath.zeros(n,n))
i = 1
while i <= n:
identity_matrix.set(i, i, 1)
i += 1
return identity_matrix
def spectral():
pass
def svd():
pass
#== Other Functions ======================================================
def parse_list(l):
"""
Returns: A list into an appropriate string for usage in the Matrix
constructor function.
"""
s = str(l)
s = s.replace("[[","").replace("]]","").replace("[","").replace("],",";").replace(" ","")
return s
#== Properties of Matrix Testers =========================================
def is_square(matrix):
"""
Returns: True if the matrix is square, else False.
Precondition: matrix is a Matrix object.
"""
pass
def is_li(matrix):
"""
"""
pass
| true |
55c66a66feecaff04a6d39b148d8852ef3f15a75 | Python | theRealSuperMario/tikzplotlib | /test/test_subplots_with_colorbars.py | UTF-8 | 533 | 2.578125 | 3 | [
"MIT"
] | permissive | def plot():
import numpy as np
from matplotlib import pyplot as plt
data = np.zeros((3, 3))
data[:2, :2] = 1.0
fig = plt.figure()
ax1 = plt.subplot(131)
ax2 = plt.subplot(132)
ax3 = plt.subplot(133)
axes = [ax1, ax2, ax3]
for ax in axes:
im = ax.imshow(data)
fig.colorbar(im, ax=ax, orientation="horizontal")
return fig
# TODO reintroduce
# from helpers import assert_equality
# def test():
# assert_equality(plot, __file__[:-3] + "_reference.tex")
# return
| true |
e66c58a1daeed7fab59094ef61cef577df604575 | Python | sejin1996/pythonstudy | /ch03/function/17_global_variable.py | UTF-8 | 1,245 | 3.921875 | 4 | [] | no_license | # 전역 변수 (global variable)
# 함수 외부에서 정의된 변수
# 프로그램 내 모든 곳에서 사용 가능
# 함수 내에서 전역 변수의 값을 변경하려면 global 키워드 사용
a=1 # 함수 밖에서 정의된 전역변수 a
def show():
c=a+b # 전역변수 a 모든 곳에서 사용 가능
print(a) # 전역변수 a
print(b)
print(c)
def add():
print(a) # 전역변수 a 모든 곳에서 사용 가능
print(b)
b=2 # 함수 밖에서 정의된 전역변수 b - 모든 곳에서사용가능
show()
add()
print(a) # 전역변수는 모든곳에서 사용가능
print(b) # 함수 내부/함수 외부 모든곳에서 전역변수 사용가능
# print(c) # 전역변수가 정의되어 있더라도 사용전에 정의되어 있어야 함
c=3
#전역 변수가 정의된 위치는 함수 앞/다음 상관 없음
### 전역변수 특징 - 파일 내 어디서든 사용 가능
# 함수 입장
# 전역변수 : 함수 내부에서 사용할 수 있음
# 함수 정의 후에 전역변수가 만들어 졌어도 사용가능
# 함수 외부에서 전역변수를 이용해서 실행(처리)할 때
# 실행전에 전역변수가 생성되어 있어야 함 | true |
fc78a3d93c26406d8444331ac106d9da08386f2e | Python | pawelmuller/core-wars | /test_warrior.py | UTF-8 | 677 | 2.53125 | 3 | [] | no_license | from warrior import Warrior
from Redcode import Instruction
def test_Warrior_import_from_file():
instructions = [
Instruction("ADD #4, 3"),
Instruction(" MOV 2, @2"),
Instruction("JMP -2"),
Instruction(" DAT #0, #0")
]
warrior = Warrior("Warriors/Dwarf.red")
for correct, test in zip(instructions, warrior._instructions):
assert correct.compare(test)
def test_Warrior_add_process():
warrior = Warrior("Warriors/Dwarf.red")
test_core = [0 for _ in range(5000)]
warrior.attach_core(test_core)
warrior.set_start(15)
warrior.add_process(90)
assert warrior._process_queue == [15, 90]
| true |
bb4951cbe596c4b5666c6750880b563849d5a6e0 | Python | oesteban/quality-assessment-protocol | /scripts/qap_aws_s3_dict_generator.py | UTF-8 | 3,364 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
def pull_S3_sublist(yaml_outpath, img_type, bucket_name, bucket_prefix, creds_path):
import os
from CPAC.AWS import fetch_creds
import yaml
s3_list = []
s3_dict = {}
bucket = fetch_creds.return_bucket(creds_path, bucket_name)
# Filter for anat/rest
if img_type == 'anat':
subkey_type = 'anatomical_scan'
elif img_type == 'rest':
subkey_type = 'functional_scan'
# Build S3-subjects to download
for bk in bucket.list(prefix=bucket_prefix):
s3_list.append(str(bk.name))
# Build dictionary of filepaths
for sfile in s3_list:
ssplit = sfile.split('/')
sub_id = ssplit[-4]
session_id = ssplit[-3]
scan_id = ssplit[-2]
if img_type in scan_id:
# this ONLY handles raw data inputs, not CPAC-generated outputs!
if not s3_dict.has_key((sub_id, session_id, scan_id)):
resource_dict = {}
resource_dict[subkey_type] = sfile
s3_dict[(sub_id, session_id, scan_id)] = {}
s3_dict[(sub_id, session_id, scan_id)].update(resource_dict)
else:
s3_dict[(sub_id, session_id, scan_id)].update(resource_dict)
else:
continue
if len(s3_dict) == 0:
err = "\n[!] Filepaths have not been successfully gathered from " \
"the S3 bucket!\n"
raise Exception(err)
dict_len = len(s3_dict)
# write yaml file
with open(yaml_outpath,"wt") as f:
f.write(yaml.dump(s3_dict))
if os.path.isfile(yaml_outpath):
print "\nS3 dictionary file successfully created: %s\n" % yaml_outpath
print "Total number of subject-session-scans: %d\n" % dict_len
else:
err = "\n[!] Filepaths from the S3 bucket have not been " \
"successfully saved to the YAML file!\nOutput filepath: %s\n" \
% yaml_outpath
raise Exception(err)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("scan_type", type=str, \
help="'anat' or 'func', depending on which QAP " \
"measures you will be using the S3 subject "\
"dictionary for")
parser.add_argument("bucket_name", type=str, \
help="the name of your AWS S3 bucket")
parser.add_argument("bucket_prefix", type=str, \
help="the filepath prefix to the top level of " \
"your raw data directory on S3 storage")
parser.add_argument("creds_path", type=str, \
help="the path to the file containing your AWS " \
"credentials")
parser.add_argument("outfile_path", type=str, \
help="the full filepath for the S3 subject " \
"YAML dictionary this script will create")
args = parser.parse_args()
# run it!
pull_S3_sublist(args.outfile_path, args.scan_type, args.bucket_name, \
args.bucket_prefix, args.creds_path)
if __name__ == "__main__":
main()
| true |
7f1e2ab4bdb9d0cb688de9cfc92b86389fa5c7fb | Python | tkmanabat/RoboMark | /cogs/translate.py | UTF-8 | 834 | 2.765625 | 3 | [] | no_license | import discord
from discord.ext import commands
import googletrans
from googletrans import Translator
class translate(commands.Cog):
def __init__(self,client):
self.client=client
@commands.command(name="translate", help="Visit https://py-googletrans.readthedocs.io/en/latest/ for languages supported")
async def translate(self, ctx, lang_to, *args):
lang_to = lang_to.lower()
if lang_to not in googletrans.LANGUAGES and lang_to not in googletrans.LANGCODES:
raise commands.BadArgument("Sorry lods di pwede yang gawa gawa mong language")
text = ' '.join(args)
translator = googletrans.Translator()
text_translated = translator.translate(text, dest=lang_to).text
await ctx.send(text_translated)
def setup(client):
client.add_cog(translate(client)) | true |
0e158032bdefeb55663c2f4bdb25d6e1bd7c6152 | Python | chseng213/QingCloudTest | /main/utils/request.py | UTF-8 | 1,453 | 2.96875 | 3 | [] | no_license | # encoding: utf-8
import requests
class Request(object):
"""
Simple package requests
"""
MAX_RETRY = 3
def __init__(self, session=None, timeout=5):
"""
:param session: requests session object default:new session obj
:param timeout: requests timeout default:5S
"""
if session is None:
session = requests.session()
self.session = session
self.timeout = timeout
def get(self, url):
"""
http get method and raise exception where result does not meet expectations
:param url: api url
:return: json data or raise RequestsException
"""
requests_num = 0
connect_num = 0
code = 0
response = {}
for _ in range(self.MAX_RETRY):
try:
response = self.session.get(url, timeout=self.timeout)
code = response.status_code
break
except requests.exceptions.ConnectionError:
connect_num += 1
except requests.exceptions.RequestException:
requests_num += 1
# max three times to request, raise Exception
assert code == 200, "ConnectionError:%s\tRequestError:%s" % (connect_num, requests_num)
# raise Exception where ret_code is not equal 0
assert response.json().get("ret_code") == 0, response.json().get("message")
return response
| true |
0610c676cda0f00ab360f280c39364e22d784abc | Python | Thxios/ProjectResearch | /Gomoku/game.py | UTF-8 | 2,174 | 3.484375 | 3 | [] | no_license | from lib import *
from .board import Board
from .validation import ThreeChecker, FourChecker, FiveChecker, SixChecker
class Game:
def __init__(self):
# self._board = np.zeros((15, 15), dtype=np.int)
self._board = Board(np.zeros((15, 15), dtype=np.int))
def get(self, x, y):
# if x < 0 or x >= 15 or y < 0 or y >= 15:
# return -1
# return self._board[x, y]
return self._board.get(x, y)
def board(self):
return self._board.board()
'''
def valid(self, x, y, turn) -> bool:
if x < 0 or x >= 15 or y < 0 or y >= 15:
return False
if self.get(x, y) != 0:
return False
lines = self.get_direction_lines(x, y, turn)
if turn == BLACK:
_six = SixChecker.check(lines, turn)
_five = FiveChecker.check(lines, turn)
if _five > _six:
print('BLACK win')
return True
if _six:
return False
if ThreeChecker.check(lines, turn) >= 2:
return False
if FourChecker.check(lines, turn) >= 2:
return False
elif turn == WHITE:
if FiveChecker.check(lines, turn):
print('WHITE win')
return True
return True
def get_direction_lines(self, x, y, put=None):
origin = vector(x, y)
lines = np.array([[self.get(*(i * direction + origin)) for i in range(-4, 5)] for direction in directions])
if put is not None:
for line in lines:
line[4] = put
return lines
def put(self, x, y, turn):
# if self.valid(x, y, turn):
# self._board[x, y] = turn
self._board.board[x, y] = turn
'''
def show(self):
for x in range(15):
for y in range(15):
if self.get(y, x) == 0:
print('. ', end='')
elif self.get(y, x) == 1:
print('X ', end='')
elif self.get(y, x) == 2:
print('O ', end='')
print('\n', end='')
print('\n')
| true |
31a392285420ad29bf867a31bd3c23c8502597e4 | Python | Photonsnake/Photonsnake.github.io | /second.py | UTF-8 | 296 | 3.9375 | 4 | [] | no_license | num1 = int(input('Введите первое число\n'))
num2 = int(input('Введите второе чилсо\n'))
num3 = int(num1 +num2)
if num3 % 2 == 0:
print('Сумма этих чисел четная')
else:
print('Сумма этих чисел нечетная')
| true |
7e194b09e46bb13fa3a551e52d32b838dc40124c | Python | Vagacoder/Python_for_everyone | /Ch05/2017-8-13.py | UTF-8 | 1,138 | 3.75 | 4 | [] | no_license | ## Ch05 P5.5
def repeat(string, n, delim):
new_string = string + (delim+string)*2
return new_string
print(repeat('ho', 3, ', '))
## Ch05 P5.22
def balance(initial, rate, year):
account_balance = initial * (1 + rate/100)**year
return account_balance
print(balance(1000, 5, 1))
## Ch05 R5.6
def value(currency, float_number):
print('%s%.2f'%(currency, float_number))
value('USD', 2985.39755)
## Ch05 P5.25
def printDigit(d):
if d == 0 : print('||:::',end='')
elif d == 1 : print(':::||',end='')
elif d == 2 : print('::|:|',end='')
elif d == 3 : print('::||:',end='')
elif d == 4 : print(':|::|',end='')
elif d == 5 : print(':|:|:',end='')
elif d == 6 : print(':||::',end='')
elif d == 7 : print('|:::|',end='')
elif d == 8 : print('|::|:',end='')
elif d == 9 : print('|:|::',end='')
def printZip(zipcode):
zip_code = str(zipcode)
print('|', end ='')
sum = 0
for i in zip_code:
printDigit(int(i))
sum += int(i)
check = 10 - sum%10
printDigit(check)
print('|')
printZip(95014)
| true |
21d31f827202e1d44d461f2450a0c2929c3f09ca | Python | alexgomezalanis/gkde-loss-function | /gkde_loss.py | UTF-8 | 6,491 | 2.6875 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from random import shuffle
from torch.distributions.multivariate_normal import MultivariateNormal
class KernelDensityLoss(nn.Module):
"""
Kernel Density Gaussian loss
Takes a batch of embeddings and corresponding labels.
"""
def __init__(
self,
device,
emb_size=64,
scale_matrix=True,
init_w=10.0,
init_b=0.1,
init_bandwidth=1.0,
loss_method='softmax',
margin_triplet=1.0,
optimize_bandwidth=True,
num_classes=7,
kde_log=False
):
super(KernelDensityLoss, self).__init__()
if optimize_bandwidth:
self.bandwidths = nn.Parameter(torch.tensor(num_classes * [init_bandwidth]).to(device))
else:
self.bandwidths = num_classes * [init_bandwidth]
self.scale_matrix = scale_matrix
self.kde_log = kde_log
if scale_matrix:
self.w = nn.Parameter(torch.tensor(init_w).to(device))
self.b = nn.Parameter(torch.tensor(init_b).to(device))
self.emb_size = emb_size
self.distributions = []
self.probs = []
self.digit_indices = []
self.device = device
self.num_classes = num_classes
self.loss_method = loss_method
self.margin_triplet = torch.FloatTensor([margin_triplet])
self.margin_triplet = self.margin_triplet.to(device)
assert self.loss_method in ['softmax', 'contrast', 'triplet', 'softmax_contrast', 'all']
if self.loss_method == 'softmax':
self.embed_loss = self.softmax_loss
elif self.loss_method == 'contrast':
if kde_log:
self.embed_loss = self.contrast_loss_log
else:
self.embed_loss = self.contrast_loss
elif self.loss_method == 'triplet':
self.embed_loss = self.triplet_loss
def get_likelihood(self, index_utt, index_class):
probs = []
for j in self.digit_indices[index_class]:
if index_utt != j:
index_i = min(index_utt, j)
index_j = max(index_utt, j)
probs.append(torch.exp(-self.distances[index_i][index_j] / (2 * self.bandwidths[index_class])))
likelihood = torch.mean(torch.stack(probs))
if self.kde_log:
return torch.log(likelihood)
return likelihood
def triplet_loss(self, embeddings):
negative_probs = []
positive_probs = []
for index_pos, arr_pos in enumerate(self.digit_indices):
len_arr_pos = len(arr_pos)
for pos_idx in range(len_arr_pos):
pos_prob = self.probs[index_pos,pos_idx,index_pos]
positive_probs.append(pos_prob)
# Choose randomly a negative sample which lies inside the margin
neg_classes = [n for n in range(self.num_classes) if n != index_pos]
shuffle(neg_classes)
max_prob = float('-inf')
is_semihard = False
for index_neg in neg_classes:
arr_neg = list(range(len(self.digit_indices[index_neg])))
shuffle(arr_neg)
for neg_idx in arr_neg:
neg_prob = self.probs[index_neg,neg_idx,index_pos]
if (pos_prob > neg_prob and neg_prob + self.margin_triplet > pos_prob):
is_semihard = True
break
elif neg_prob > max_prob:
max_prob = neg_prob
if is_semihard:
negative_probs.append(neg_prob)
else:
negative_probs.append(max_prob)
positive_probs = torch.stack(positive_probs)
negative_probs = torch.stack(negative_probs)
L = F.relu(negative_probs - positive_probs + self.margin_triplet)
loss = L.sum()
return loss
def softmax_loss(self, embeddings):
# N spoofing classes, M utterances per class
N, M, _ = list(self.probs.size())
L = []
for j in range(N):
L_row = []
for i in range(M):
L_row.append(-F.log_softmax(self.probs[j,i], 0)[j])
L_row = torch.stack(L_row)
L.append(L_row)
L_torch = torch.stack(L)
return L_torch.sum()
def contrast_loss(self, embeddings):
# N spoofing classes, M utterances per class
N, M, _ = list(self.probs.size())
L = []
for j in range(N):
L_row = []
for i in range(M):
probs_to_classes = torch.sigmoid(self.probs[j,i])
excl_probs_to_classes = torch.cat((probs_to_classes[:j], probs_to_classes[j+1:]))
L_row.append(1.0 - torch.sigmoid(self.probs[j,i,j]) + torch.max(excl_probs_to_classes))
L_row = torch.stack(L_row)
L.append(L_row)
L_torch = torch.stack(L)
return L_torch.sum()
def contrast_loss_log(self, embeddings):
# N spoofing classes, M utterances per class
N, M, _ = list(self.probs.size())
L = []
for j in range(N):
L_row = []
for i in range(M):
probs_to_classes = self.probs[j,i]
excl_probs_to_classes = torch.cat((probs_to_classes[:j], probs_to_classes[j+1:]))
L_row.append(torch.max(excl_probs_to_classes) - self.probs[j,i,j])
L_row = torch.stack(L_row)
L.append(L_row)
L_torch = torch.stack(L)
return F.relu(L_torch).sum()
def forward(self, embeddings, target, size_average=True):
classes = np.unique(target)
self.num_classes = len(classes)
self.digit_indices = [np.where(target == i)[0] for i in range(self.num_classes)]
self.distances = [[0] * len(target) for _ in range(len(target))]
for i in range(len(target)):
for j in range(i+1, len(target)):
self.distances[i][j] = torch.dist(embeddings[i], embeddings[j], 2)
probs = []
for class_idx, class_indices in enumerate(self.digit_indices):
probs_row = []
for utt_idx, utterance in enumerate(class_indices):
probs_col = []
for class_centroid in range(self.num_classes):
probs_col.append(self.get_likelihood(utterance, class_centroid))
probs_col = torch.stack(probs_col)
probs_row.append(probs_col)
probs_row = torch.stack(probs_row)
probs.append(probs_row)
self.probs = torch.stack(probs)
if self.scale_matrix:
torch.clamp(self.w, 1e-6)
self.probs = self.w * self.probs + self.b
if self.loss_method == 'all':
loss = self.softmax_loss(embeddings) + self.contrast_loss(embeddings) + self.triplet_loss(embeddings)
elif self.loss_method == 'softmax_contrast':
if self.kde_log:
loss = self.softmax_loss(embeddings) + self.contrast_loss_log(embeddings)
else:
loss = self.softmax_loss(embeddings) + self.contrast_loss(embeddings)
else:
loss = self.embed_loss(embeddings)
return loss | true |
c54fd7a0f4afa21e3ba14b40ff505d2e4ee0aaa3 | Python | uborzz/flask-csgo | /app/competitives/services.py | UTF-8 | 1,137 | 2.84375 | 3 | [] | no_license | from ..db import db
def update_players_and_maps_found_in_competitives():
"""
Exploramos partidas competitivas en el sistema y nos quedamos con el nick más
reciente y el id_steam de las coincidencias con la lista de miembros del clan
de steam, tengan o no abierto el perfil.
"""
matches = db.get_all_competitive_matches_simplified()
matches_count = matches.count()
clan_members_ids = db.get_members_ids()
members_in_competitives = list()
maps = list()
steam_ids_aux = list()
for match in matches:
team_text = "players_team" + str(match["local_team"])
for player in match[team_text]:
if player["steam_id"] in clan_members_ids:
if player["steam_id"] not in steam_ids_aux: # append only once
members_in_competitives.append(player)
steam_ids_aux.append(player["steam_id"])
if match["map"] not in maps:
maps.append(match["map"])
db.update_group_competitive_info(
players=members_in_competitives, maps=maps, n_matches=matches_count
)
print(members_in_competitives)
| true |
5da322f185f53a62e256f7ca17fd07b534139ac2 | Python | carol8562/lecture2 | /python_basics/06-loops.py | UTF-8 | 479 | 3.625 | 4 | [] | no_license | name = "Alice"
names = [name, "Bob", "Charlie"]
coordinates = (10.0, 20.0, 30.0)
mixed = [name, 40.0, True]
for i in range(len(name)):
print(name[i])
for i in range(len(names)):
print(names[i])
for i in range(len(coordinates)):
print(coordinates[i])
for i in range(len(mixed)):
print(mixed[i])
for letter in name:
print(letter)
for n in names:
print(n)
for coordinate in coordinates:
print(coordinate)
for elt in mixed:
print(elt)
| true |
060ab50f944aa5e531f953d2c20382ef9e191364 | Python | dapivei/agua-cdmx-tecnico | /src/utils/funcs_result.py | UTF-8 | 1,054 | 2.875 | 3 | [] | no_license | """
RELEVANT DATA AND INDICATORS
"""
import pandas as pd
def get_imputed_data(df1, df2):
imputed_data = pd.concat([df1, df2[['nom_loc', 'nom_mun']]], axis=1)
imputed_data = pd.concat([df2[['cvegeo', 'geometry']], imputed_data], axis=1)
imputed_data.columns = imputed_data.columns.str.strip().str.lower()
def indicator_1(df):
"""
Estimar consumo promedio diario per capita, nivel delegación.
"""
m = pd.DataFrame(((df.groupby('nom_mun').sum()['consumo_total']/df.groupby('nom_mun').sum()['pobtot'])*1000)/60, columns=['comsumo_diario_promedio_total_per_capita']).reset_index()
m['superavit_deficit'] = round(m['comsumo_diario_promedio_total_per_capita']-100, 2)
pob_total = pd.DataFrame(df.groupby('nom_mun').sum()['pobtot']).reset_index()
viv_total = pd.DataFrame(df.groupby('nom_mun').sum()['vivtot']).reset_index()
m['poblacion_total'] = round(pob_total.iloc[: , -1])
m['viviendas_total'] = round(viv_total.iloc[: , -1])
m = m.round({'comsumo_diario_promedio_total_per_capita': 2})
return(m)
| true |
c4a5f3fbd402f59e7f932c38930184774b077e73 | Python | google-code/avaloria | /src/objects/object_search_funcs.py | UTF-8 | 3,573 | 3.140625 | 3 | [
"LicenseRef-scancode-public-domain",
"ClArtistic"
] | permissive | """
Default functions for formatting and processing object searches.
This is in its own module due to them being possible to
replace from the settings file by use of setting the variables
ALTERNATE_OBJECT_SEARCH_ERROR_HANDLER
ALTERNATE_OBJECT_SEARCH_MULTIMATCH_PARSER
Both the replacing functions must have the same name and same input/output
as the ones in this module.
"""
def handle_search_errors(emit_to_obj, ostring, results, global_search=False):
"""
Takes a search result (a list) and
formats eventual errors.
emit_to_obj - object to receive feedback.
ostring - original search string
results - list of object matches, if any
global_search - if this was a global_search or not
(if it is, there might be an idea of supplying
dbrefs instead of only numbers)
"""
if not results:
emit_to_obj.msg("Could not find '%s'." % ostring)
return None
if len(results) > 1:
# we have more than one match. We will display a
# list of the form 1-objname, 2-objname etc.
# check if the emit_to_object may se dbrefs
show_dbref = global_search and \
emit_to_obj.check_permstring('Builders')
string = "More than one match for '%s'" % ostring
string += " (please narrow target):"
for num, result in enumerate(results):
invtext = ""
dbreftext = ""
if result.location == emit_to_obj:
invtext = " (carried)"
if show_dbref:
dbreftext = "(#%i)" % result.id
string += "\n %i-%s%s%s" % (num+1, result.name,
dbreftext, invtext)
emit_to_obj.msg(string.strip())
return None
else:
return results[0]
def object_multimatch_parser(ostring):
"""
Parse number-identifiers.
Sometimes it can happen that there are several objects in the room
all with exactly the same key/identifier. Showing dbrefs to
separate them is not suitable for all types of games since it's
unique to that object (and e.g. in rp-games the object might not
want to be identified like that). Instead Evennia allows for
dbref-free matching by letting the user number which of the
objects in a multi-match they want.
Ex for use in game session:
> look
You see: ball, ball, ball and ball.
> get ball
There where multiple matches for ball:
1-ball
2-ball
3-ball
4-ball
> get 3-ball
You get the ball.
The actual feedback upon multiple matches has to be
handled by the searching command. The syntax shown above is the
default.
For replacing, the method must be named the same and
take the searchstring as argument and
return a tuple (int, string) where int is the identifier
matching which of the results (in order) should be used to
pick out the right match from the multimatch). Note
that the engine assumes this number to start with 1 (i.e. not
zero as in normal Python).
"""
if not isinstance(ostring, basestring):
return (None, ostring)
if not '-' in ostring:
return (None, ostring)
try:
index = ostring.find('-')
number = int(ostring[:index])-1
return (number, ostring[index+1:])
except ValueError:
#not a number; this is not an identifier.
return (None, ostring)
except IndexError:
return (None, ostring)
| true |
30ce91b9d541e891ef03f4704e44098a47a5bf1f | Python | Justintanvo/RecipeProject | /RecipeProject.py | UTF-8 | 12,644 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Can We Predict Whether a Recipe Is Vegetarian Based Off of Ratings and Nutrition Values?
# Specifically, we want to predict whether a recipe will be vegetarian based off of 5 features: Ratings, Calories, Fat, Protein, and Sodium using a recipe data set from Epicurious in Kaggle. This is a Multivariate Binary Classification Problem. We approach this problem using four popular machine learning algorithms for classification: Logistic Regression, Support Vector Machines, RandomForest, and XGBoosting. We also use a 5-fold Kfold Cross-Validation to compare the accuracy between the algorithms.
# In[40]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas import read_csv
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import xgboost as gb
from xgboost import plot_tree
from xgboost import XGBClassifier
from xgboost import plot_importance
import statsmodels.api as sm
from sklearn.tree import export_graphviz
import graphviz as gz
import os
os.environ["PATH"] += os.pathsep + r'C:\Users\juvo\Desktop\graphviz-2.38\release\bin'
import seaborn as sns
# These libraries allows us to do the various algorithms and data visualization for our data analysis.
# # Data Exploration and Cleaning
# We first want to explore our data set and to clean the data set of outliers and NaN values if there are any.
# In[42]:
recipes = pd.read_csv("C:/Users/juvo/Desktop/Projects/RecipeProject/epi_r.csv")
print(recipes.shape) # get information of number of rows and columns
print(recipes.head()) # displays the first 5 rows of the data.
print(recipes.tail()) # displays the last 5 rows of the data.
# From the exploration, we can see that there are NaN values for calories. Likewise, we can also guess that there will be huge outliers in calories in some dishes that might affect the analysis, and so we want to remove them. (Note an alternate approach could be to fill in the missing NaN values with the average of the other data points)
# In[44]:
recipes_no_na = recipes.dropna() # drops all rows with NA values
recipes_no_outliers = recipes_no_na[recipes_no_na['calories'] < 10000] # removes outliers for 10000
# Also, we can see that the rating and the nutrition facts are all in the beginning columns. The other columns appear to be mostly ingredient categorical variables so we can remove them. There is one column "Vegetarian" that we do want to remain and so we create a new dataframe with the variables we are concerned with.
# In[46]:
recipes_important_variables = recipes_no_outliers.loc[0:, ['title','rating','calories','protein','fat','sodium','vegetarian']] # retains only important variables
print(recipes_important_variables.head()) # verifies whether our new dataframe is correct
# Now we want to split our dataframes into the dependent variable("Vegetarian") and the independent variables/features (Rating, Calories, Protein, Fat, and Sodium) and convert to a form that the algorithms can use.
# In[48]:
recipes_independent_variables = recipes_important_variables.loc[0:, ['rating','calories','protein','fat','sodium'] ]
recipes_dependent_variables = recipes_important_variables.loc[0:, "vegetarian" ]
recipes_independent_variables = recipes_independent_variables.values #Converts dataframe into a numpy array which the algorithms uses
recipes_dependent_variables = recipes_dependent_variables.values #Converts dataframe into a numpy array which the algorithms uses
print(recipes_independent_variables)
print(recipes_dependent_variables)
# # Algorithms Prediction Accuracy Comparison
# First, we want to compare the accuracy of the algorithms to each other by using a kfold-cross validation. We settle with 5, because it's the standard fold that gives sufficient accuracy without too much computational power needed. 5-fold cross validation splits the data into 5 data sets randomly. 4 of them are training sets and are tested against the last (test) set. This process is done 5 times, with each data set becoming a test set only once, and the other 4 becoming training sets.
#
# From the training sets, the Algorithms are making a prediction of whether a recipe is vegetarian which is determined by the features sets. Then, the predictions are tested against the test set.
# In[50]:
def get_score(model, X_train, X_test, y_train, y_test):
model.fit(X_train, y_train)
return model.score(X_test, y_test)
scores_lr = []
scores_svm = []
scores_rf = []
scores_xg = []
kf = KFold(n_splits=5, random_state=123) #splits the data into 5, and sets a seed for reproducibility
for train_index,test_index in kf.split(recipes_independent_variables): #does and shows the 5-fold Cross Validation and appends the accuracy to the score variable arrays aboves
X_train, X_test, y_train, y_test = recipes_independent_variables[train_index], recipes_independent_variables[test_index], recipes_dependent_variables[train_index], recipes_dependent_variables[test_index]
scores_lr.append(get_score(LogisticRegression(solver="lbfgs", multi_class="auto",max_iter=4000), X_train, X_test, y_train, y_test))
scores_svm.append(get_score(SVC(gamma="scale", probability=True), X_train, X_test, y_train, y_test))
scores_rf.append(get_score(RandomForestClassifier(n_estimators=100), X_train, X_test, y_train,y_test))
scores_xg.append(get_score(XGBClassifier(),X_train, X_test, y_train, y_test))
print(scores_lr)
print(scores_svm)
print(scores_rf)
print(scores_xg)
# From the four algorithms, we see that Random Forest is the most accurate, with XGBoosting second, and logistical regression and support vector machines being close in accuracy. This was surprising because XGBoosting and RandomForest are both decision trees, where XGBoosting usually outperforms RandomForest. The reason is probably that the default parameters established in the XGBoosting Algorithm need to be tuned towards the data set. Logistic regression did not perform as well most likely due to one reason being the presense of multicollinearity, or relationships, between the independent variables with each other. For example, it is very likely that there is a positively correlated relationship between calories and ratings. When calories go up in a recipe, most likely so will the rating. Support Vector Machines also did not perform as well due to most likely the default kernel needing to be tuned towards the data set.
# # Logistic Regression Analysis
# In[52]:
def get_predict_proba(model, X_train, X_test, y_train, y_test): # creates a function to predict probability for each recipe
model.fit(X_train, y_train)
return model.predict_proba(X_test)
logistic_regr = LogisticRegression(solver="lbfgs", multi_class="auto",max_iter=4000)
log_regpred = get_predict_proba(logistic_regr, X_train, X_test, y_train, y_test) # gets the probability for each recipe to be vegetarian
log_reg_coef = logistic_regr.coef_ # shows the coefficients of the logistic regression
np.set_printoptions(suppress=True) # turns scientific notation off
sns.set(style="darkgrid", color_codes=True)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(ncols=5, sharey=True)
sns.regplot(x=recipes_important_variables['rating'], y=recipes_important_variables['vegetarian'], ax=ax1, logistic=True)
sns.regplot(x=recipes_important_variables['calories'], y=recipes_important_variables['vegetarian'], ax=ax2, logistic=True)
sns.regplot(x=recipes_important_variables['protein'], y=recipes_important_variables['vegetarian'], ax=ax3, logistic=True)
sns.regplot(x=recipes_important_variables['fat'], y=recipes_important_variables['vegetarian'], ax=ax4, logistic=True)
sns.regplot(x=recipes_important_variables['sodium'], y=recipes_important_variables['vegetarian'], ax=ax5, logistic=True)
print(log_regpred)
print(log_reg_coef)
log_reg_coef_outputeffect = np.exp(log_reg_coef) # Increase in output odd chances for every one unit increase in the independent variable
print(log_reg_coef_outputeffect)
# A major advantage of logistic regression is interpretability. For log_pred, we can see the probabilities assigned to whether or not a recipe is vegetarian. For example, the last recipe has probability [0.99420783, 0.00579217]. This means that according to the logistic regression, there's an approximate 99.4% chance that it's vegetarian, and 0.6% chance that it's not vegetarian because of the recipe's nutrition values and rating. For log_reg_coef, it gives us the coefficients of each independent variable. We look at the first independent variable rating with the coefficient of 0.13366278. For every 13.4% increase in the rating, there's an equivalent 14.3% [1.143] increase in the odds of the recipe being vegetarian.
# # Support Vector Machine Analysis
# In[85]:
svc = SVC(gamma="scale",probability=True).fit(X_train, y_train)
svc_predict = get_predict_proba(svc, X_train, X_test, y_train, y_test)
print("Prediction Matrix")
print(svc_predict)
# Support Vector Machines is considered a "black box" algorithm, so in contrast to the logistic equation there is less interpretability especially with many features. There are no coefficients and unlike Random Forest and XGBoosting, we can't do a feature importance unless the kernel (used to help created the decision boundary between yes or no) is set to linear. However, RBF, the default kernel, since linear used too much computational power , and the RBF is generally considered the more accurate kernel if properly tuned. However we can predict the odds for each recipe of being vegeterian or not just like with logistic regression. So just like in logistic regression, we see that the [0.61100674, 0.38899326] is the yes and no odds for a recipe being vegetarian.
# # Random Forest Analysis
# In[84]:
rf = RandomForestClassifier(n_estimators=100).fit(X_train, y_train) #puts the fitted random forest algorithm with 100 trees
rf_important_features= rf.feature_importances_ #attribute that gets the value of the importance features
rf_predict = get_predict_proba(rf, X_train, X_test, y_train, y_test) #gets the predicted probability of the random forest algorithm
recipe_independent_variable_names = ["Rating", "Calories", "Protein", "Fat", "Sodium"]
recipe_independent_variable_names_str = str(recipe_independent_variable_names) #Sets the string of the names of independent variables to attach to the feature importances
rf_important_features_str=str(rf_important_features)
rf_feature_imp = pd.Series(rf_important_features,index=recipe_independent_variable_names).sort_values(ascending=False)
print(rf_feature_imp)
print("Prediction Matrix")
print(rf_predict)
sns.barplot(x=rf_feature_imp, y=rf_feature_imp.index) #plots the feature importances
plt.xlabel('Feature Importance Score(%)')
plt.ylabel('Features')
plt.title("Random Forest")
plt.show()
# There are no coefficients with Random Forest since it is not a regression. However we can see how important each feature is as a percentage when the decision trees make a decision. We see that calories, sodium, protein, and fat are much higher features that can affect the model if removed, and so has a huge effect on the results.
# # XGBoosting Analysis
# In[81]:
xgb = XGBClassifier().fit(X_train, y_train) #fits the XGBoosting Analysis
xgb_important_features = xgb.feature_importances_ #finds feature importances
xgb_pred = get_predict_proba(xgb, X_train, X_test, y_train, y_test) # find the prediction probabilities of yes and no
print("Prediction Matrix")
print(xgb_pred)
plot_importance(xgb, title = "XGBoosting") # plots the feature importance
# XGboosting is a gradient boosting method that uses multiple decision trees to make the classification prediction of whether a recipe is vegetarian or not. Using the built in plot function, we can see that the general features importances is similar to Random Forest. The f0,f1,f2,f3,f4 correspond to rating, calories, protein, fat, and sodium respectively. The relative importance of the features is the concern, so the difference in the X scale is not important. We can see that protein and calories are both important features and that calories is the most important feature. Rating is also the least important feature. There is a major difference in the importance of sodium, where XGboosting rated it 3rd best and tied with fat, while Random Forest rated it a close 2nd to Calories.
#
#
| true |
b7515c151be469bc2e5b179d994fe33ccc029ba2 | Python | IvanaXu/PyTools | /077.Test_BeeWare_windows/beeware-tutorial/helloworld/windows/Hello World/src/app_packages/toga/widgets/switch.py | UTF-8 | 2,586 | 3.234375 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | from toga.handlers import wrapped_handler
from .base import Widget
class Switch(Widget):
""" Switch widget, a clickable button with two stable states, True (on, checked) and False (off, unchecked)
Args:
label (str): Text to be shown next to the switch.
id (str): AN identifier for this widget.
style (:obj:`Style`): An optional style object.
If no style is provided then a new one will be created for the widget.
on_toggle (``callable``): Function to execute when pressed.
is_on (bool): Current on or off state of the switch.
enabled (bool): Whether or not interaction with the button is possible, defaults to `True`.
factory (:obj:`module`): A python module that is capable to return a
implementation of this class with the same name. (optional & normally not needed)
"""
def __init__(self, label, id=None, style=None, on_toggle=None, is_on=False, enabled=True, factory=None):
super().__init__(id=id, style=style, factory=factory)
self._impl = self.factory.Switch(interface=self)
self.label = label
self.on_toggle = on_toggle
self.is_on = is_on
self.enabled = enabled
@property
def label(self):
""" Accompanying text label of the Switch.
Returns:
The label text of the widget as a ``str``.
"""
return self._label
@label.setter
def label(self, value):
if value is None:
self._label = ''
else:
self._label = str(value)
self._impl.set_label(value)
self._impl.rehint()
@property
def on_toggle(self):
""" The callable function for when the switch is pressed
Returns:
The ``callable`` on_toggle function.
"""
return self._on_toggle
@on_toggle.setter
def on_toggle(self, handler):
self._on_toggle = wrapped_handler(self, handler)
self._impl.set_on_toggle(self._on_toggle)
@property
def is_on(self):
""" Button Off/On state.
Returns:
``True`` if on and ``False`` if the switch is off.
"""
return self._impl.get_is_on()
@is_on.setter
def is_on(self, value):
if not isinstance(value, bool):
raise ValueError("Switch.is_on can only be set to true or false")
self._impl.set_is_on(value)
def toggle(self):
"""Reverse the value of `Slider.is_on` property from true to false and
vice versa.
"""
self.is_on = not self.is_on
| true |
6732bc0db416b112544768379769f87ccde8b9e8 | Python | karamveer5/Python-Basic-Programs | /Fibonacci series using function.py | UTF-8 | 166 | 3.625 | 4 | [] | no_license | def fibbo():
k=int(input("Enter a number: "))
a=0
b=1
print(a,end=' ')
print(b,end=' ')
for i in range(3,k+1):
c=a+b
print(c,end=' ')
a=b
b=c
fibbo() | true |
8219894d3e016a6af21a917980a7ff92b529450e | Python | m987/pycode- | /hello.py | UTF-8 | 260 | 2.640625 | 3 | [] | no_license | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/kot')
def kot():
return 'KOT!!!'
@app.route('/potega/<int:a>')
def potega(a):
return str(a*a)
if __name__ == '__main__':
app.run() | true |
5ba40675a07c0ccad919c6959fce8068923aa34a | Python | geesaladurgaakhil/database | /Hospital_Management/Read_details.py | UTF-8 | 2,201 | 3.53125 | 4 | [] | no_license | # Question 2: Fetch Hospital and Doctor Information using hospital Id and doctor Id
import psycopg2
def get_connection():
connection=psycopg2.connect(user="postgres",
password="root",
host="127.0.0.1",
port="5432",
database="python_db")
return connection
def close_connection(connection):
if connection:
connection.close()
print("PostgresSql connection closed")
def get_hospital_details(hospital_Id):
try:
connection=get_connection()
cursor=connection.cursor()
select_query="""SELECT * FROM hospital where hospital_id=%s"""
cursor.execute(select_query,(hospital_Id,))
records=cursor.fetchall()
print("printing hospital records")
print("--------------------------")
for row in records:
print("Hospital Id:",row[0])
print("Hospital Name:",row[1])
print("Bed Count:",row[2])
close_connection(connection)
except (Exception, psycopg2.Error) as error:
print("error while getting the records ",error)
def get_doctor_details(doctor_id):
try:
connection=get_connection()
cursor=connection.cursor()
select_query="""select * from doctor where doctor_id=%s"""
cursor.execute(select_query,(doctor_id,))
records=cursor.fetchall()
print("printing Doctor records")
print("------------------------")
for row in records:
print("Doctor Id:", row[0])
print("Doctor Name:", row[1])
print("Hospital Id:", row[2])
print("Joining Date:", row[3])
print("Specialty:", row[4])
print("Salary:", row[5])
print("Experience:", row[6])
close_connection(connection)
except (Exception, psycopg2.Error) as error:
print("Error while getting data",error)
print("Question 2: Fetch Hospital and Doctor Information using hospital Id and doctor Id")
print("-------------------------")
get_hospital_details(2)
print("-------------------------")
get_doctor_details(102)
| true |
c523cd13707cc09c195d236e9e7daa3544c027dc | Python | Aasthaengg/IBMdataset | /Python_codes/p03415/s838804804.py | UTF-8 | 64 | 2.953125 | 3 | [] | no_license | a,b,c = input()
d,e,f = input()
g,h,i = input()
print(a + e + i) | true |
643d81dd60279675490b9479e88ebbf111b7507b | Python | nprokhorenko/tonal_classif | /Keras_tonal_classif.py | UTF-8 | 1,376 | 2.59375 | 3 | [] | no_license | import numpy as np
from keras import models
from keras import layers
from keras.utils import to_categorical
from keras.datasets import imdb
np.load.__defaults__= (None, True, True, 'ASCII')
(training_X, training_y), (testing_X, testing_y) = imdb.load_data(num_words=10000)
data = np.concatenate((training_X, testing_X), axis=0)
targets = np.concatenate((training_y, testing_y), axis=0)
def vectorize(sequences, dimension = 10000):
res = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
res[i, sequence] = 1
return res
data = vectorize(data)
targets = np.array(targets).astype("float32")
X_test = data[:10000]
y_test = targets[:10000]
X_train = data[10000:]
y_train = targets[10000:]
model = models.Sequential()
model.add(layers.Dense(50, activation = "relu", input_shape=(10000,)))
model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
model.add(layers.Dense(50, activation = "relu"))
model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
model.add(layers.Dense(50, activation = "relu"))
model.add(layers.Dense(1, activation = "sigmoid"))
model.summary()
model.compile(
optimizer = "adam",
loss = "binary_crossentropy",
metrics = ["accuracy"]
)
results = model.fit(
X_train, y_train,
epochs= 3,
batch_size = 400,
validation_data = (X_test, y_test)
)
print("Test-Accuracy:", np.mean(results.history["val_acc"])) | true |
3505dcdb7f7fd253ff3463f5b24f92de037960f3 | Python | lulu2184/11785-vqa | /preprocessing/create_dict.py | UTF-8 | 1,701 | 2.84375 | 3 | [] | no_license | import json
import os
import numpy as np
from word_dictionary import WordDict
EMBEDDING_DIMENSION = 300
def create_dict(data_dir):
word_dict = WordDict()
questions = []
files = [
'v2_OpenEnded_mscoco_train2014_questions.json',
'v2_OpenEnded_mscoco_val2014_questions.json',
'v2_OpenEnded_mscoco_test2015_questions.json',
'v2_OpenEnded_mscoco_test-dev2015_questions.json'
]
for path in files:
question_path = os.path.join(data_dir, path)
questions = json.load(open(question_path))['questions']
for question in questions:
word_dict.tokenize(question['question'], True)
return word_dict
def create_glove_embedding(idx_to_word, glove_file):
word_to_embedding = {}
with open(glove_file, 'r') as f:
entries = f.readlines()
embedding_dim = len(entries[0].split(' ')) - 1
weights = np.zeros((len(idx_to_word), embedding_dim), dtype=np.float32)
for entry in entries:
vals = entry.split(' ')
word = vals[0]
vals = [float(s) for s in vals[1:]]
word_to_embedding[word] = np.array(vals)
for idx, word in enumerate(idx_to_word):
if word in word_to_embedding:
weights[idx] = word_to_embedding[word]
return weights, word_to_embedding
if __name__ == '__main__':
word_dict = create_dict('data')
word_dict.dump_to_file('data/dictionary.pkl')
embedding_dim = EMBEDDING_DIMENSION
glove_file = 'data/glove/glove.6B.{}d.txt'.format(embedding_dim)
weights, word_to_embedding = create_glove_embedding(
word_dict.idx_to_word, glove_file)
np.save('data/glove6b_init_{}d.npy'.format(embedding_dim), weights)
| true |
88d9a91bd715e76aa203dc13fb5274915693ff72 | Python | luisdrita/RoadSafety | /imagery_dataset/select_lsoa.py | UTF-8 | 547 | 2.515625 | 3 | [] | no_license | from shutil import copyfile
import os
right_files = []
all_files = []
with open('../imagery_dataset/one_img_per_lsoa.txt') as fp:
line = fp.readline()
while line:
line = fp.readline()
right_files.append((line.split("\n")[0]).split(".")[0])
for root, dirs, files in os.walk("yolofinal"):
for filename in files:
all_files.append(filename)
for file in right_files:
for filename in all_files:
if filename == file:
copyfile("yolofinal/" + filename, "yolofinal/yolofinal_lsoa/" + filename) | true |
40ec400388074a74a034eaebba1ab841850e62a4 | Python | peeyush1999/p3 | /Python Tutorial Sheet/Recursion tutorial sheet/recursion_12.py | UTF-8 | 327 | 3.328125 | 3 | [] | no_license | def sumArray(mylist,msum=0,count=0):
if(count==len(mylist)):
return msum
msum = msum + mylist[count]
return sumArray(mylist,msum,count+1)
num = int(input())
inputs = input().split(' ')
for i in range(num):
inputs[i] = int(inputs[i])
print(sumArray(inputs))
| true |
a53d6565df9b35b1ce05d38283dc147ec01bd485 | Python | gyoforit/server-with-algorithm | /movies/views.py | UTF-8 | 2,119 | 2.53125 | 3 | [] | no_license | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_GET, require_POST, require_http_methods
from .models import Movie
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from random import random, sample
# Create your views here.
@require_GET
def index(request):
movies = Movie.objects.all()
context = {
'movies': movies,
}
return render(request, 'movies/index.html', context)
@require_GET
def detail(request, movie_pk):
movie = get_object_or_404(Movie, pk=movie_pk)
context = {
'movie': movie,
}
return render(request, 'movies/detail.html', context)
@login_required
@require_GET
def recommended(request):
today_month = datetime.today().month
movies = Movie.objects.filter(release_date__month=today_month)
from_now = dict()
for movie in movies:
from_now[movie.pk] = datetime.today().year-movie.release_date.year
context = {
'today_month': today_month,
'movies': movies,
'from_now': from_now,
}
return render(request, 'movies/recommended.html', context)
@login_required
@require_GET
def recommended2(request):
return render(request, 'movies/recommended2.html')
@login_required
@require_GET
def mvti(request):
answer = request.GET.get('answer')
movies = ''
if answer == '1':
movies = Movie.objects.filter(release_date__year__lte=1999).order_by('?')[:10]
elif answer == '2':
movies = Movie.objects.order_by('-vote_average')[:10]
elif answer == '3':
movies = Movie.objects.order_by('popularity')[:10]
else:
movies = Movie.objects.order_by('?')[:10]
movies_list = []
for movie in movies:
movies_list.append({
'title': movie.title,
'release_date': movie.release_date,
'vote_average': movie.vote_average,
'poster_path': movie.poster_path,
})
response = {
'moviesList': movies_list,
}
return JsonResponse(response)
| true |
f11bf35fd6e7bab60dfafbd0ac89d76ab4d7ba7e | Python | Jiezhi/myleetcode | /src/2279-MaximumBagsWithFullCapacityOfRocks.py | UTF-8 | 1,254 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
CREATED AT: 2022/5/23
Des:
https://leetcode.com/problems/maximum-bags-with-full-capacity-of-rocks/
GITHUB: https://github.com/Jiezhi/myleetcode
Difficulty: Medium
Tag:
See:
"""
from typing import List
class Solution:
def maximumBags(self, capacity: List[int], rocks: List[int], additionalRocks: int) -> int:
"""
79 / 79 test cases passed.
Status: Accepted 05/23/2022
Runtime: 1536 ms
Memory Usage: 22 MB
:param capacity:
:param rocks:
:param additionalRocks:
n == capacity.length == rocks.length
1 <= n <= 5 * 10^4
1 <= capacity[i] <= 10^9
0 <= rocks[i] <= capacity[i]
1 <= additionalRocks <= 10^9
:return:
"""
leftover = sorted([capacity[i] - rocks[i] for i in range(len(rocks))])
i = 0
while additionalRocks > 0 and i < len(leftover):
if leftover[i] > additionalRocks:
break
additionalRocks -= leftover[i]
i += 1
return i if i <= len(leftover) else i - 1
def test():
assert Solution().maximumBags(capacity=[2, 3, 4, 5], rocks=[1, 2, 4, 4], additionalRocks=2) == 3
if __name__ == '__main__':
test()
| true |
5725d09b5683256d4626a639cc9f0aaccb062fd0 | Python | drudox/ODE-FORTRAN90 | /plot/eq1_1.py | UTF-8 | 2,442 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
'''
comments..
'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
#-----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------
#a= np.fromfile('part.out',dtype=float ,count=-1 ,sep='')
t_bwdE = np.loadtxt('bwdEuler001.out',usecols=(0,) )
u_bwdE = np.loadtxt('bwdEuler001.out',usecols=(1,) )
t_fwdE = np.loadtxt('fwdEuler001.out',usecols=(0,) )
u_fwdE = np.loadtxt('fwdEuler001.out',usecols=(1,) )
real_t= np.loadtxt('realSol_001.out',usecols=(0,) )
real_y = np.loadtxt('realSol_001.out',usecols=(1,) )
ab2_t = np.loadtxt('AB2_001.out',usecols=(0,) )
ab2_u = np.loadtxt('AB2_001.out',usecols=(1,) )
am2_t = np.loadtxt('AM2_001.out',usecols=(0,) )
am2_u = np.loadtxt('AM2_001.out',usecols=(1,) )
#b=
#i=0
#for row in a:
# print(a[i] , '\t' , b[i])
# i+=1
x = np.arange(0,2,0.25)
#y1 = fun_x(x)
#y2 = fun_x2(x)
#for i in range(len(x)):
# print( x[i] , y1[i] )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%f')
minorXLocator = MultipleLocator(0.05)
minorYLocator = MultipleLocator(0.025)
plt.rc('text', usetex=True )
#plt.rc('text', latex.preamble=r'\usepackage{babel}')
plt.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
plt.rc('font',family='' ,size=20 )
fig,ax = plt.subplots(1,figsize=(12,6))
#plt.plot(real_t,real_y,'--k',linewidth=2,label=r'Analitical Sol.')
#plt.plot(t_bwdE,u_bwdE,'-.k',linewidth=2,label=r'Implicit Euler')
#plt.plot(t_fwdE,u_fwdE,'-.k',linewidth=2,label=r'Explicit Euler')
ax.set_xlim(0,2)
plt.plot(real_t,real_y,'k',linewidth=2,label=r'Analitical Sol.')
plt.plot(t_bwdE,u_bwdE,'r',linewidth=2,label=r'Implicit Euler')
plt.plot(t_fwdE,u_fwdE,'b',linewidth=2,label=r'Explicit Euler')
plt.plot(ab2_t,ab2_u,'g',linewidth=2,label=r'Adams Bashforth 2 step')
plt.legend()
plt.title (r'$\dot{y} = 10(y-1) u$',fontsize=22)
plt.xlabel(r'$x$',fontsize=22)
plt.ylabel(r'$y$',fontsize=22)
#plt.xticks(range(0,2,0.5))
ax.xaxis.set_minor_locator(minorXLocator)
ax.yaxis.set_minor_locator(minorYLocator)
#plt.subplots_adjust(bottom=0.2)
#plt.margins()
plt.grid()
plt.tight_layout(0.5)
plt.savefig('case1.pdf')
plt.show()
'''
plt.figure(2, figsize=(10,6))
plt.plot(a,b,'r+')
plt.show()
'''
| true |
0db07d9872eb3d1406811e6d90962e55f90994d6 | Python | wangzhon150/Personal-RPG | /progress.py | UTF-8 | 13,825 | 2.671875 | 3 | [] | no_license | import pygame
import cal
import log
import json
from datetime import date
from typing import Dict
from math import floor
PROGRESS_BG_COLOUR = (68, 53, 46)
PROGRESS_TITLE_COLOUR = (255, 255, 255)
SUBTITLE_COLOUR = (255, 255, 255)
LINE_COLOUR = (255, 255, 255)
BUTTON_COLOUR = (76, 74, 73)
ICON_TEXT_COLOUR = (255, 255, 255)
TEXT_COLOUR = (255, 255, 255)
XP_COLOUR = (204, 170, 146)
SKILL_FOREGROUND = (255, 255, 255)
SKILL_BACKGROUND = (76, 74, 73)
SKILLS_FILE = 'skills.json'
def run_progress() -> None:
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption('Progress')
screen.fill((0, 0, 0))
pygame.draw.rect(screen, PROGRESS_BG_COLOUR, (20, 20, 760, 560))
skills = import_skills()
title_font = pygame.font.Font('calist.ttf', 40)
subtitle_font = pygame.font.Font('calist.ttf', 24)
text_font = pygame.font.Font('calist.ttf', 20)
icon_font = pygame.font.Font('calist.ttf', 28)
title_surface = title_font.render('Progress', 1, PROGRESS_TITLE_COLOUR)
screen.blit(title_surface, (30, 25))
pygame.draw.line(screen, LINE_COLOUR, (350, 30), (350, 570))
back_pos = (30, 540)
pygame.draw.rect(screen, BUTTON_COLOUR,
(back_pos[0], back_pos[1], 90, 30))
back_surface = icon_font.render('Return', 1, ICON_TEXT_COLOUR)
screen.blit(back_surface, (33, 540))
surface_1 = subtitle_font.render('Adventurer ' + str(get_level_per_xp(skills['Adventurer'])), 1, SUBTITLE_COLOUR)
xp_1 = subtitle_font.render(' (' + str(skills['Adventurer'] -
get_xp_per_level(get_level_per_xp(skills['Adventurer']))) + '/'
+ str(get_xp_per_level(get_level_per_xp(skills['Adventurer'])+1))
+ ' XP)', 1, XP_COLOUR)
width_1 = get_bar_width('Adventurer')
pygame.draw.rect(screen, SKILL_BACKGROUND, (40, 133, 280, 22))
pygame.draw.rect(screen, SKILL_FOREGROUND, (42, 135, width_1, 18))
surface_2 = subtitle_font.render('Chronicler ' + str(get_level_per_xp(skills['Chronicler'])), 1, SUBTITLE_COLOUR)
xp_2 = subtitle_font.render(' (' + str(skills['Chronicler'] -
get_xp_per_level(get_level_per_xp(skills['Chronicler']))) + '/'
+ str(get_xp_per_level(get_level_per_xp(skills['Chronicler'])+1))
+ ' XP)', 1, XP_COLOUR)
width_2 = get_bar_width('Chronicler')
pygame.draw.rect(screen, SKILL_BACKGROUND, (40, 203, 280, 22))
pygame.draw.rect(screen, SKILL_FOREGROUND, (42, 205, width_2, 18))
surface_3 = subtitle_font.render('Diligent ' + str(get_level_per_xp(skills['Diligent'])), 1, SUBTITLE_COLOUR)
xp_3 = subtitle_font.render(' (' + str(skills['Diligent'] -
get_xp_per_level(get_level_per_xp(skills['Diligent']))) + '/'
+ str(get_xp_per_level(get_level_per_xp(skills['Diligent'])+1))
+ ' XP)', 1, XP_COLOUR)
width_3 = get_bar_width('Diligent')
pygame.draw.rect(screen, SKILL_BACKGROUND, (40, 273, 280, 22))
pygame.draw.rect(screen, SKILL_FOREGROUND, (42, 275, width_3, 18))
surface_4 = subtitle_font.render('Erudite ' + str(get_level_per_xp(skills['Erudite'])), 1, SUBTITLE_COLOUR)
xp_4 = subtitle_font.render(' (' + str(skills['Erudite'] -
get_xp_per_level(get_level_per_xp(skills['Erudite']))) + '/'
+ str(get_xp_per_level(get_level_per_xp(skills['Erudite'])+1))
+ ' XP)', 1, XP_COLOUR)
width_4 = get_bar_width('Erudite')
pygame.draw.rect(screen, SKILL_BACKGROUND, (40, 343, 280, 22))
pygame.draw.rect(screen, SKILL_FOREGROUND, (42, 345, width_4, 18))
surface_5 = subtitle_font.render('Hardy ' + str(get_level_per_xp(skills['Hardy'])), 1, SUBTITLE_COLOUR)
xp_5 = subtitle_font.render(' (' + str(skills['Hardy'] -
get_xp_per_level(get_level_per_xp(skills['Hardy']))) + '/'
+ str(get_xp_per_level(get_level_per_xp(skills['Hardy']) + 1))
+ ' XP)', 1, XP_COLOUR)
width_5 = get_bar_width('Hardy')
pygame.draw.rect(screen, SKILL_BACKGROUND, (40, 413, 280, 22))
pygame.draw.rect(screen, SKILL_FOREGROUND, (42, 415, width_5, 18))
surface_6 = subtitle_font.render('Wizard ' + str(get_level_per_xp(skills['Wizard'])), 1, SUBTITLE_COLOUR)
xp_6 = subtitle_font.render(' (' + str(skills['Wizard'] -
get_xp_per_level(get_level_per_xp(skills['Wizard']))) + '/'
+ str(get_xp_per_level(get_level_per_xp(skills['Wizard'])+1))
+ ' XP)', 1, XP_COLOUR)
width_6 = get_bar_width('Wizard')
pygame.draw.rect(screen, SKILL_BACKGROUND, (40, 483, 280, 22))
pygame.draw.rect(screen, SKILL_FOREGROUND, (42, 485, width_6, 18))
screen.blit(surface_1, (40, 100))
screen.blit(surface_2, (40, 170))
screen.blit(surface_3, (40, 240))
screen.blit(surface_4, (40, 310))
screen.blit(surface_5, (40, 380))
screen.blit(surface_6, (40, 450))
screen.blit(xp_1, (220, 100))
screen.blit(xp_2, (220, 170))
screen.blit(xp_3, (220, 240))
screen.blit(xp_4, (220, 310))
screen.blit(xp_5, (220, 380))
screen.blit(xp_6, (220, 450))
# other side
today = date.today()
pos_dict = {}
pygame.draw.rect(screen, BUTTON_COLOUR, (130, 540, 77, 30))
diary_surface = icon_font.render('Diary', 1, ICON_TEXT_COLOUR)
screen.blit(diary_surface, (133, 540))
date_surface = subtitle_font.render('It is ' + today.strftime('%A') + ', ' + str(today.day) + ' '
+ today.strftime('%B') + ' ' + str(today.year) + '.', 1, SUBTITLE_COLOUR)
selection_surface = subtitle_font.render('How did you spend the day?', 1, SUBTITLE_COLOUR)
pygame.draw.rect(screen, BUTTON_COLOUR, (360, 126, 95, 30))
a_1 = text_font.render('Discovery', 1, TEXT_COLOUR)
pos_dict[(360, 126, 95, 30)] = ('Adventurer', 3, 'You discovered a new location: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (463, 126, 60, 30))
a_2 = text_font.render('Event', 1, TEXT_COLOUR)
pos_dict[(463, 126, 60, 30)] = ('Adventurer', 4, 'You attended an event: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (530, 126, 80, 30))
a_3 = text_font.render('Journey', 1, TEXT_COLOUR)
pos_dict[(530, 126, 80, 30)] = ('Adventurer', 3, 'You journeyed to: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (360, 195, 110, 30))
c_1 = text_font.render('Chronicling', 1, TEXT_COLOUR)
pos_dict[(360, 195, 110, 30)] = ('Chronicler', 1, 'You chronicled the day\'s events', False)
pygame.draw.rect(screen, BUTTON_COLOUR, (478, 195, 100, 30))
c_2 = text_font.render('Journaling', 1, TEXT_COLOUR)
pos_dict[(478, 195, 100, 30)] = ('Chronicler', 2, 'You wrote in your log diary', False)
pygame.draw.rect(screen, BUTTON_COLOUR, (360, 265, 95, 30))
d_1 = text_font.render('Early Bird', 1, TEXT_COLOUR)
pos_dict[(360, 265, 95, 30)] = ('Diligent', 2, 'You began your day bright and early', False)
pygame.draw.rect(screen, BUTTON_COLOUR, (463, 265, 75, 30))
d_2 = text_font.render('Lecture', 1, TEXT_COLOUR)
pos_dict[(463, 265, 75, 30)] = ('Diligent', 1, 'You attended a lecture for: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (546, 265, 70, 30))
d_3 = text_font.render('Review', 1, TEXT_COLOUR)
pos_dict[(546, 265, 70, 30)] = ('Diligent', 2, 'You reviewed your notes for: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (624, 265, 55, 30))
d_4 = text_font.render('Tasks', 1, TEXT_COLOUR)
pos_dict[(624, 265, 55, 30)] = ('Diligent', 2, 'You made some routine progress on: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (360, 335, 75, 30))
e_1 = text_font.render('Articles', 1, TEXT_COLOUR)
pos_dict[(360, 335, 75, 30)] = ('Erudite', 2, 'You read some informational articles from: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (443, 335, 60, 30))
e_2 = text_font.render('Books', 1, TEXT_COLOUR)
pos_dict[(443, 335, 60, 30)] = ('Erudite', 3, 'You finished a chapter of: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (511, 335, 60, 30))
e_3 = text_font.render('Trivia', 1, TEXT_COLOUR)
pos_dict[(511, 335, 60, 30)] = ('Erudite', 2, 'You learned about: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (360, 405, 55, 30))
h_1 = text_font.render('Walk', 1, TEXT_COLOUR)
pos_dict[(360, 405, 55, 30)] = ('Hardy', 3, 'You walked a great many steps: ', True)
pygame.draw.rect(screen, BUTTON_COLOUR, (423, 405, 85, 30))
h_2 = text_font.render('Workout', 1, TEXT_COLOUR)
pos_dict[(423, 405, 85, 30)] = ('Hardy', 3, 'You worked out', False)
pygame.draw.rect(screen, BUTTON_COLOUR, (360, 475, 75, 30))
w_1 = text_font.render('Sorcery', 1, TEXT_COLOUR)
pos_dict[(360, 475, 75, 30)] = ('Wizard', 3, 'You made some headway on: ', True)
screen.blit(a_1, (365, 128))
screen.blit(a_2, (468, 128))
screen.blit(a_3, (536, 128))
screen.blit(c_1, (365, 197))
screen.blit(c_2, (483, 197))
screen.blit(d_1, (365, 268))
screen.blit(d_2, (468, 268))
screen.blit(d_3, (549, 268))
screen.blit(d_4, (627, 268))
screen.blit(e_1, (365, 338))
screen.blit(e_2, (447, 338))
screen.blit(e_3, (514, 338))
screen.blit(h_1, (365, 408))
screen.blit(h_2, (426, 408))
screen.blit(w_1, (365, 478))
screen.blit(date_surface, (360, 35))
screen.blit(selection_surface, (360, 70))
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.MOUSEBUTTONUP and \
back_pos[0] <= event.pos[0] <= back_pos[0] + 100 and \
back_pos[1] <= event.pos[1] <= back_pos[1] + 90:
return cal.run_calendar()
elif event.type == pygame.MOUSEBUTTONUP and \
130 <= event.pos[0] <= 207 and \
540 <= event.pos[1] <= 570:
text = input('Diary entry: ')
log.add_to_log(today, '!!DIARY', text)
elif event.type == pygame.MOUSEBUTTONUP:
for pos in pos_dict:
if pos[0] <= event.pos[0] <= pos[0] + pos[2] and \
pos[1] <= event.pos[1] <= pos[1] + pos[3]:
tup = pos_dict[pos]
return run_progress_update(tup[0], tup[1], tup[2], tup[3])
def run_progress_update(skill: str, xp: int, text: str, input_: bool) -> None:
screen = pygame.display.set_mode((500, 200))
pygame.display.set_caption('Update Progress')
screen.fill((0, 0, 0))
pygame.draw.rect(screen, PROGRESS_BG_COLOUR, (20, 20, 460, 160))
text_font = pygame.font.Font('calist.ttf', 20)
icon_font = pygame.font.Font('calist.ttf', 28)
back_pos = (30, 140)
pygame.draw.rect(screen, BUTTON_COLOUR,
(back_pos[0], back_pos[1], 90, 30))
back_surface = icon_font.render('Cancel', 1, ICON_TEXT_COLOUR)
screen.blit(back_surface, (33, 140))
pygame.draw.rect(screen, BUTTON_COLOUR, (408, 140, 60, 30))
save_surface = icon_font.render('Save', 1, ICON_TEXT_COLOUR)
screen.blit(save_surface, (412, 140))
if text[-1] == ' ':
prompt_surface = text_font.render(text, 1, TEXT_COLOUR)
else:
prompt_surface = text_font.render(text + '.', 1, TEXT_COLOUR)
screen.blit(prompt_surface, (40, 40))
pygame.display.flip()
if input_:
text += input(text)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.MOUSEBUTTONUP and \
back_pos[0] <= event.pos[0] <= back_pos[0] + 100 and \
back_pos[1] <= event.pos[1] <= back_pos[1] + 90:
return run_progress()
elif event.type == pygame.MOUSEBUTTONUP and \
408 <= event.pos[0] <= 468 and \
140 <= event.pos[1] <= 170:
return update_progress(skill, xp, text)
def update_progress(skill: str, xp: int, text: str) -> None:
skills = import_skills()
skills[skill] += xp
with open(SKILLS_FILE, 'w') as o:
json.dump(skills, o)
text += '.'
if skill == 'Adventurer':
log.add_to_log(date.today(), 'Adventure', text)
elif skill == 'Diligent':
log.add_to_log(date.today(), 'Diligence', text)
elif skill == 'Erudite':
log.add_to_log(date.today(), 'Erudition', text)
elif skill == 'Hardy':
log.add_to_log(date.today(), 'Hardiness', text)
elif skill == 'Wizard':
log.add_to_log(date.today(), 'Wizardry', text)
return run_progress()
def import_skills() -> Dict[str, int]:
with open(SKILLS_FILE) as o:
skills = json.load(o)
return skills
def get_bar_width(name: str) -> int:
skills = import_skills()
xp = skills[name]
base = get_xp_per_level(get_level_per_xp(xp))
percent = (xp-base) / (get_xp_per_level(get_level_per_xp(xp)+1) - get_xp_per_level(get_level_per_xp(xp)))
return floor(percent * 278)
def get_xp_per_level(lvl: int) -> int:
if lvl <= 0:
return 0
elif lvl == 1:
return 5
elif lvl == 2:
return 10
else:
return get_xp_per_level(lvl-1) + get_xp_per_level(lvl-2)
def get_level_per_xp(xp: int) -> int:
if xp <= 4:
return 0
else:
lvl = 1
while True:
if xp < get_xp_per_level(lvl):
return lvl-1
lvl += 1
| true |
6b2bc76ab6f039e99c7e725ed46a2769fba68723 | Python | jimmy74185/CS453 | /TCP_client.py | UTF-8 | 1,588 | 3.203125 | 3 | [] | no_license | import sys
from socket import *
import time
def main():
string = sys.argv[1]
ip = sys.argv[2] # Received input from command
port = int(sys.argv[3]) # Turn the string into int for port number
cID = sys.argv[4]
cSocket = socket(AF_INET, SOCK_STREAM) #Making socket interface
msg = string+" "+cID
try:
cSocket.connect ((ip, port)) #send message to the server in
cSocket.send(msg.encode())
count = 0 # "StringTypedIn ConnectionID" format
cSocket.settimeout(60) #Timeout for 60secs
while count <= 3 : # Maximum 3 tries
received = cSocket.recv(2048)
recvMsg = received.decode()
if recvMsg.split()[0] == "OK":
print("Connection established "+recvMsg[2:])
break
if recvMsg.split()[0] == "RESET":
#If received RESET, ask for new Connection ID and send the message again.
if count == 2 :
print("Connection Failure")
break
print("Connection Error "+recvMsg.split()[1])
cID = input("New Connection ID:")
msg = string+" "+cID
cSocket.close()
cSocket = socket(AF_INET, SOCK_STREAM)
cSocket.connect ((ip, port))
cSocket.send(msg.encode())
count+=1
except:
print("Time Out")
cSocket.close()
if __name__ == "__main__":
main() | true |
26abbd01c5a9f907e8c7eab9fe12d1b6df882c32 | Python | dagrala/vibida | /Python_Scripts/appendIndicators.py | UTF-8 | 2,015 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import csv, sys
label_ratios = {
1: "Déficit/Superávit per cápita",
2: "Ahorro neto per cápita",
3: "Ahorro bruto per cápita",
4: "Elasticidad presupuestaria",
5: "Gasto público por habitante",
6: "Gasto de inversión per cápita",
7: "Gasto de inversión directa per cápita",
8: "Presion fiscal",
9: "Autonomía fiscal",
10: "Incremento de deuda per cápita"
}
def main(indFile):
try:
reader = csv.DictReader(open(indFile, 'r'), delimiter=',')
except IOError:
print 'El fichero', indFile, 'no existe'
sys.exit(0)
#se obtienen los valores de los ratios
new_column = []
for index, row in enumerate(reader):
if index == 0:
new_column.append(row["periodo"])
new_column.append(row["valor"])
else:
new_column.append(row["valor"])
# print row["id"], row["valor"]
# for i, e in enumerate(new_column):
# print i, e
aFile = '-'.join(indFile.split('-')[1:])
print indFile
print "aFile:",aFile
fileCreated = False
try:
reader = csv.reader(open(aFile, 'r'), delimiter=',')
fileCreated = True
print 'el fichero ya estaba creado'
except IOError:
print 'El fichero', aFile, 'no existe, hay que crearlo'
header = ["pre","entidad","id","indicador"]
entidad = aFile.split('-')[0]
preData = []
if fileCreated:
preData = list(reader)
if(new_column[0] in preData[0][4:]):
print "Error: Se esta intentando añadir una columna de un periodo ya existente ->", new_column[0]
sys.exit(0)
else:
preData.append(header)
for i in range(1,11):
preData.append(["IND", entidad, str(i), label_ratios[i]])
#Escribir/reescribir fichero, ojo hacer copia previa
writer = csv.writer(open(aFile, 'wb'), delimiter=',')
for index, elem in enumerate(preData):
writer.writerow(elem + [new_column[index]])
if __name__ == '__main__':
if len(sys.argv) < 1:
print "Error: Se debe introducir 1 argumento: python appendIndicators.py periodo-entidad-indi.csv"
else:
main(sys.argv[1])
#python appendIndicators.py 2010-01002AA00-indi.csv | true |
a2f54da97403cea11685c6d0cb4e1d636a33aa84 | Python | vijayendra-g/deeplearning-timeseries | /lstm_model.py | UTF-8 | 2,122 | 2.765625 | 3 | [] | no_license | from keras.models import Sequential
from keras.layers import Dense, LSTM, GRU, Dropout
import numpy as np
class LSTM_RNN:
def __init__(self, look_back, dropout_probability = 0.2, init ='he_uniform', loss='mse', optimizer='rmsprop'):
self.rnn = Sequential()
self.look_back = look_back
self.rnn.add(LSTM(10, stateful = True, batch_input_shape=(1, 1, 1), init=init))
self.rnn.add(Dropout(dropout_probability))
self.rnn.add(Dense(1, init=init))
self.rnn.compile(loss=loss, optimizer=optimizer)
def batch_train_test(self, trainX, trainY, testX, testY, nb_epoch=150):
print('Training LSTM-RNN...')
for epoch in range(nb_epoch):
print('Epoch '+ str(epoch+1) +'/{}'.format(nb_epoch))
training_losses = []
testing_losses = []
for i in range(len(trainX)):
y_actual = trainY[i]
for j in range(self.look_back):
training_loss = self.rnn.train_on_batch(np.expand_dims(np.expand_dims(trainX[i][j], axis=1), axis=1),
np.array([y_actual]))
training_losses.append(training_loss)
self.rnn.reset_states()
print('Mean training loss = {}'.format(np.mean(training_losses)))
mean_testing_loss = []
for i in range(len(testX)):
for j in range(self.look_back):
testing_loss = self.rnn.test_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1),
np.array([testY[i]]))
testing_losses.append(testing_loss)
self.rnn.reset_states()
for j in range(self.look_back):
y_pred = self.rnn.predict_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1))
self.rnn.reset_states()
mean_testing_loss = np.mean(testing_losses)
print('Mean testing loss = {}'.format(mean_testing_loss))
return mean_testing_loss
| true |
7c97323e6c64a073d556171ec52a4d6901a95bb4 | Python | laurens777/Advent-of-Code-2020 | /day7/part2.py | UTF-8 | 761 | 3.46875 | 3 | [] | no_license | import re
def main():
bags = {}
with open("input.txt") as input:
for line in input:
line = line.strip("\n")
try:
outer = re.search('([a-z]+ [a-z]+) bags contain', line)
inner = re.findall('([1-9] [a-z]+ [a-z]+) bag', line)
except AttributeError:
print("something went wrong")
bags[outer.group(1)] = [bag for bag in inner]
print(numBags(bags, "shiny gold"))
def numBags(bags, color):
i = 0
for bag in bags[color]:
s = bag.split(" ", 1)
if numBags(bags, s[1]) == 0:
i += int(s[0])
else:
i += int(s[0]) + int(s[0])*numBags(bags, s[1])
return i
if __name__ == "__main__":
main() | true |
540a09878b0135dd4d1155cca1d7dda7bffe2f2c | Python | Anonymous-2611/AdaRec | /models/nas/modules/finetune.py | UTF-8 | 3,025 | 2.59375 | 3 | [] | no_license | import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from models.bert.embedding.bert import BertEmbedding
from .operators import OPERATOR_CLS, OPERATOR_NAME
class FinetuneEdge(nn.Module):
def __init__(self, num_hidden, alpha):
super(FinetuneEdge, self).__init__()
op_index = np.argmax(alpha)
self.op_name = OPERATOR_NAME[op_index]
self.add_module(self.op_name, OPERATOR_CLS[op_index](num_hidden))
def forward(self, x):
x = F.relu(x) # [B, L, C]
h = self.__getattr__(self.op_name)(x) # [B, L, C]
return h
class FinetuneNode(nn.Module):
def __init__(self, num_hidden, alphas):
super(FinetuneNode, self).__init__()
self.edges = nn.ModuleList([FinetuneEdge(num_hidden, alpha) for alpha in alphas])
def forward(self, inputs):
# inputs: [..., B, L, C]
# len(inputs) == len(self.edges)
states = []
for edge, edge_input in zip(self.edges, inputs):
cur_state = edge(edge_input)
states.append(cur_state)
return sum(states) # [B, L, C]
class FinetuneCell(nn.Module):
def __init__(self, num_hidden, num_node, alpha_arch):
super(FinetuneCell, self).__init__()
self.num_node = num_node
self.nodes = nn.ModuleList([FinetuneNode(num_hidden, alpha_arch[idx]) for idx in range(self.num_node)])
self.attention_w = nn.Parameter(1e-2 * torch.randn(self.num_node), requires_grad=True) # [num_node]
self.ln = nn.LayerNorm(num_hidden, eps=1e-8)
def forward(self, x_in):
feature_maps = [x_in]
for node in self.nodes:
node_output = node(feature_maps) # [B, L, C]
feature_maps.append(node_output)
# feature_maps: [num_node+1, B, L, C]
attention = torch.softmax(self.attention_w, dim=0) # [num_node]
attention = torch.reshape(attention, (-1, 1, 1, 1)) # [num_node, 1, 1, 1]
outputs = torch.stack(feature_maps[-self.num_node :], 0) # [num_node, B, L, C]
outputs = attention * outputs # [num_node, B, L ,C]
outputs = torch.sum(outputs, 0) # [B, L, C]
outputs = self.ln(outputs)
outputs = F.relu(outputs)
return outputs + x_in
class FinetuneModel(nn.Module):
def __init__(self, alpha_arch, num_cells, num_node, vocab_size, num_hidden, num_class, max_len, dropout):
super(FinetuneModel, self).__init__()
self.embedding = BertEmbedding(vocab_size=vocab_size, embed_size=num_hidden, max_len=max_len, dropout=dropout)
self.cells = nn.ModuleList([FinetuneCell(num_hidden, num_node, alpha_arch) for _ in range(num_cells)])
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(num_hidden, num_class)
def forward(self, x):
x = self.embedding(x) # [B, L, C]
for cell in self.cells:
x = cell(x) # [B, L, C]
x = self.dropout(x)
out = self.out(x) # [B, L, num_class]
return out
| true |
a8c4c98abcacf3f68c6940b8ee3cb225e602ef5a | Python | chucklapress/email_bouncer_dot_fun_domain | /import_data.py | UTF-8 | 1,144 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import csv
import psycopg2
import time
import shutil
with open('/home/chucklapress/spam_table/test.csv') as in_file:
with open('/home/chucklapress/spam_table/new_data.csv', 'w') as out_file:
writer = csv.writer(out_file)
for row in csv.reader(in_file):
if any (row):
writer.writerow(row)
conn = psycopg2.connect("host=localhost dbname=myproject user=myprojectuser password=password")
cur = conn.cursor()
#UNCOMMENT BELOW TO SET INITIAL TABLE IN DATABASE
cur.execute("DROP TABLE IF EXISTS email_parser_raw")
table_create_command = """CREATE TABLE email_parser_raw (
mail TEXT
);"""
#cur.execute(table_create_command)
with open('/home/chucklapress/spam_table/new_data.csv','r') as f:
print("csv file open")
reading = csv.reader(f)
reader = csv.reader(f)
for row in reader:
print(row)
cur.execute(
"INSERT INTO email_parser_raw (mail) VALUES(%s)",
row
);
conn.commit()
shutil.os.remove("/home/chucklapress/spam_table/test.csv")
shutil.os.remove("/home/chucklapress/spam_table/new_data.csv")
conn.close()
| true |
7789dc72074b9867e35fad468d32ad1eade5ccea | Python | FirebirdSQL/firebird-qa | /tests/bugs/core_5783_test.py | UTF-8 | 1,674 | 2.515625 | 3 | [
"MIT"
] | permissive | #coding:utf-8
"""
ID: issue-6046
ISSUE: 6046
TITLE: execute statement ignores the text of the SQL-query after a comment of the form "-"
DESCRIPTION:
We concatenate query from several elements and use <CR> delimiter only to split this query into lines.
Also, we put single-line comment in SEPARATE line between 'select' and column/value that is obtained from DB.
Final query will lokk like this (lines are separated only by SINGLE delimiter, ascii_char(13), NO <CR> here!):
===
select
-- comment N1
'foo' as msg'
from
-- comment N2
rdb$database
===
This query should NOT raise any exception and must produce normal output (string 'foo').
Thanks to hvlad for suggestions.
JIRA: CORE-5783
FBTEST: bugs.core_5783
"""
import pytest
from firebird.qa import *
db = db_factory()
act = python_act('db')
expected_stdout = """
Query line: select
Query line: -- comment N1
Query line: 'foo' as msg
Query line: from
Query line: -- comment N2
Query line: rdb$database
Query result: foo
"""
@pytest.mark.version('>=3.0.4')
def test_1(act: Action, capsys):
with act.db.connect() as con:
c = con.cursor()
sql_expr = "select \r -- comment N1 \r 'foo' as msg \r from \r -- comment N2 \r rdb$database"
for line in sql_expr.split('\r'):
print(f'Query line: {line}')
c.execute(sql_expr)
for row in c:
print(f'Query result: {row[0]}')
#
act.expected_stdout = expected_stdout
act.stdout = capsys.readouterr().out
assert act.clean_stdout == act.clean_expected_stdout
| true |
dfccae829d13ee7b354a8e5e3d1b219b1b748a43 | Python | yurireeis/luxoft-python-api | /models/book.py | UTF-8 | 315 | 2.625 | 3 | [] | no_license | class Book():
def __init__(self, _id, data):
info=data.get('volumeInfo')
self.id=_id
self.title=info.get('title')
self.authors=info.get('authors')
self.type='book'
def json(self):
return dict(id=self.id,title=self.title,authors=self.authors,type=self.type)
| true |
36bf1e0a879cb37d5fb5f7e63a23fcbb5b6b2826 | Python | perfectblack999/twitterBigData | /sentiment_weighting.py | UTF-8 | 1,640 | 2.78125 | 3 | [] | no_license | __author__ = 'perfectblack999'
import sqlite3 as sql
connection = sql.connect('/Users/perfectblack999/Documents/Developer/Nkem_Big_Data_Projects/tweets.sqlite')
connection.text_factory = str
weighted_data = []
data_maxes = []
normalized_data_list = []
# Get the twitter data
with connection:
cursor = connection.cursor()
cursor.execute("SELECT rowid, sentiment, followers, retweet_count, favorite_count FROM tweet_text")
tweet_data = cursor.fetchall()
cursor.execute("select MAX(followers), MAX(retweet_count), MAX(favorite_count) FROM tweet_text")
data_maxes = cursor.fetchone()
# Calculate the weighted values
for row in tweet_data:
if row[0] is not None:
rowID = float(row[0])
else:
rowID = float(0)
if row[1] is not None:
sentiment = float(row[1])
else:
sentiment = float(0)
if row[2] is not None:
followers = float(row[2])
else:
followers = float(0)
if row[3] is not None:
retweets = float(row[3])
else:
retweets = float(0)
if row[4] is not None:
favorites = float(row[4])
else:
favorites = float(0)
weighted_data.append((rowID, retweets / data_maxes[1], followers / data_maxes[0], favorites / data_maxes[2]))
print "data maxes: followers, rt, favorites"
print data_maxes
# Put normalized values back in
with connection:
cursor = connection.cursor()
for row in weighted_data:
cursor.execute("UPDATE tweet_text SET rt_normalized = (?), followers_normalized = (?), fav_normalized = (?) WHERE rowid = (?) ",
(row[1], row[2], row[3], row[0])) | true |
cce4c1fb0a1539c3b1b485372d22736223b434fd | Python | jtwhite79/my_python_junk | /gather/gathered/rename_array_mp.py | UTF-8 | 1,615 | 2.859375 | 3 | [
"BSD-2-Clause",
"BSD-Advertising-Acknowledgement"
] | permissive | import sys
import os
import multiprocessing as mp
from Queue import Queue
import shutil
def worker(jobq,pid):
'''
args[0] = in file name
args[1] = out file name
'''
while True:
#--get some args from the queue
args = jobq.get()
#--check if this is a sentenial
if args == None:
break
shutil.copy(args[0],args[1])
jobq.task_done()
print 'worker',pid,' finished',args[0]
#--mark the sentenial as done
jobq.task_done()
return
def master():
#--dir of arrays
in_dir = 'pet_mm_day\\'
out_dir = 'temp\\'
files = os.listdir(in_dir)
#--build a queue args
q_args = []
for f in files:
f_new = 'pet_'+f[3:]
if not os.path.exists(in_dir+f_new):
q_args.append([in_dir+f,out_dir+f_new])
#break
jobq = mp.JoinableQueue()
#--for testing
#jobq.put_nowait(q_args[0])
#worker(jobq,1)
#return
procs = []
num_procs = 6
for i in range(num_procs):
#--pass the woker function jobq and a PID
p = mp.Process(target=worker,args=(jobq,i))
p.daemon = True
print 'starting process',p.name
p.start()
procs.append(p)
for q in q_args:
jobq.put(q)
for p in procs:
jobq.put(None)
#--block until all finish
for p in procs:
p.join()
print p.name,'Finished'
return
if __name__ == '__main__':
master()
| true |
cff5aba77c5b496089d370f676942eae70a630bf | Python | claireyuan/project-euler-solutions | /12.py | UTF-8 | 951 | 4.25 | 4 | [] | no_license | """
Project Euler
Problem 12: Highly divisible triangular number
Answer: 76576500
"""
import math
def triangularNumberGenerator():
"""
Generator that generates triangle numbers.
"""
i = 1
num = 0
while True:
num += i
i += 1
yield num
def numDivisors(num):
"""
Returns the number of divisors of num.
"""
divisors = set()
for i in xrange(1, int(math.sqrt(num)) + 1):
if num % i == 0:
divisors.add(i)
divisors.add(num/i)
return len(divisors)
def getFirstTriangleNumberOverTargetDivisors(target):
"""
Gets the first triangle number that has number of divisors >= target.
"""
triangularNumGenerator = triangularNumberGenerator()
triangularNum = triangularNumGenerator.next()
curNumDivisors = numDivisors(triangularNum)
while curNumDivisors < target:
triangularNum = triangularNumGenerator.next()
curNumDivisors = numDivisors(triangularNum)
return triangularNum
print getFirstTriangleNumberOverTargetDivisors(500) | true |
51200d48b54418e1bd75cbdfae4e5f80db3c3f50 | Python | gsjkirby/python.core_resources | /machine_learning/Part 1 - Data Preprocessing/data_preprocessing.py | UTF-8 | 2,132 | 3.53125 | 4 | [] | no_license | # Data Preprocessing Template
# Import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
# Import the dataset
dataset = pd.read_csv('Data.csv')
# Create independent variable matrix of features (choose every column except the last)
X = dataset.iloc[:, :-1].values
# Create dependent variable vector (3rd column)
y = dataset.iloc[:, 3].values
# Impute missing data (use Command+I to inspect the Imputer class)
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
# Fit the imputer to the 2nd and 3rd columns (index 1 and 2) in X
# (the final index is not inclusive so it doesnt include index 3)
imputer = imputer.fit(X[:, 1:3])
# Now replace the missing data in X with the mean using the transform method
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encode the categorical variables in the independent variable (X) - the first column (index 0)
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
# Onehotencode the categorical variable in X so that the algorithm knows it's
# categorical and not to fit to the values
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encode the categorical dependent variable (y) - no need to onehotencode
# because there is only one variable so the algorithm will know it is categorical
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling for X - could scale by standardise (x-mean/std) or normalise (x-min/max-min)
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
# Apply scaling to test set (already fitted to training set X and need to be scaled on same basis)
X_test = sc_X.transform(X_test)
# Feature Scaling for y - fit and transform
#sc_y = StandardScaler()
#y_train = sc_y.fit_transform(y_train)
| true |
39ef72ef129d176cc2a4ead3c8d51f3f0975995f | Python | Christhomas17/Euler-Coding-Problems | /5 - done.py | UTF-8 | 2,391 | 3.640625 | 4 | [] | no_license | '''
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
'''
### this is a solution for a smaller number but it takes too long for a larger so we need to optimize
# import numpy as np
# maxNum = 10
# nums = np.arange(2,maxNum+ 1,1)
# number = 2520
# print(all([number % num == 0 for num in nums]) == True)
# number = 2
# while all([number % num == 0 for num in nums]) == False:
# if number % 10000 == 0:
# print(number)
# number += 1
# print(number)
#############################
from math import sqrt
def is_prime(x):
if x % 2 == 0:
return(False)
else:
i = 3
#the biggest factor it can have is the sqrt of the number
while i**2 <= x:
if x % i == 0:
return(False)
else:
i += 2
return(True)
def create_primes_list(x):
primes = [2]
for i in range(3,int(sqrt(x)),2):
if is_prime(i):
primes.append(i)
return(primes)
def get_prime_factors(numbers):
goodDividers = []
old = numbers
for divisor in primes_list:
new = [int(num / divisor) if num % divisor == 0 else num for num in old]
while new != old:
old = new
goodDividers.append(divisor)
new = [int(num / divisor) if num % divisor == 0 else num for num in old]
if new == [1]*len(new):
# print('hi')
return(goodDividers)
x = 20
digits = [i for i in range(1,x+1,1)]
primes_list = create_primes_list(10000000000)
print(digits, primes_list)
primeFactors = get_prime_factors(digits)
print(primeFactors)
from functools import reduce
totProd = reduce(lambda x,y : x*y, primeFactors)
print(totProd)
#
# from functools import reduce
# maxNum = 4
# print(reduce(lambda x,y: x*y, [i for i in range(maxNum+1)]))
# from operator import mul
# from functools import reduce
# # print(reduce(lambda x,y : x*y,nums))
# # z = [2,3,4]
# # print(list(lambda x,y: x*y for x,y in z))
# # h = reduce(lambda x,y : x*y, [i for i in range(1,maxNum + 1,1)])
# # print(h)
# # print(type(h))
# reducers = [19,18,17,16,15,14,13,12,11]
# # from itertools import permutations
# # for item in permutations(np.arange(1,21,1), 19):
# # print(item)
# number = 20
# while all([number % num == 0 for num in reducers]) == False:
# if number % 10000 == 0:
# print(number)
# number += 2
| true |
6d8aea272c2432bb8e807d91eb4b64d7a3a654b7 | Python | Evergreen1992/machine_learning | /svd/svd.py | UTF-8 | 2,133 | 2.9375 | 3 | [] | no_license | #encoding=utf-8
from numpy import *
from numpy import linalg as la
print ".......SVD奇异值分解,进行数据降维处理........."
def loadExData():#用户评分数据
return mat([
[4,4,0,2,2],
[4,0,0,3,3],
[4,0,0,1,1],
[1,1,1,2,0],
[2,2,2,0,0],
[1,1,1,0,0],
[5,5,5,0,0]
])
def eulidSim(inA, inB):#欧氏距离计算
return 1.0 / (1.0 + la.norm(inA - inB ))
def standEst(dataMat, user, simMeas, item):#计算未评分的item的预计得分
n = shape(dataMat)[1]
simTotal = 0.0
ratSimTotal = 0.0
for j in range(n):
userRating = dataMat[user,j]
if userRating == 0 : continue
print "--------------------------------------------------------"
print "j = %d"%j,", item = %d"%item
print "userRating:%d "%userRating
overLap = nonzero(logical_and(dataMat[:,item].A > 0 , dataMat[:,j].A > 0))[0]
if len(overLap) == 0 :
similariTy = 0
else:
print "相似度计算:"
print dataMat[overLap,item]
print "....."
print dataMat[overLap,j]
similariTy = simMeas(dataMat[overLap,item], dataMat[overLap,j])
print "similarity is : %f"%(similariTy)
simTotal += similariTy
ratSimTotal += similariTy * userRating
if simTotal == 0 :
return 0
else:
return ratSimTotal / simTotal
def recomend(dataMat, user, N = 3, simMeas = eulidSim, estMethod = standEst):
unratedItems = nonzero(dataMat[user,:].A == 0)[1]
if len(unratedItems) == 0:
return "you rated everything"
itemScores = []
for item in unratedItems:
#计算未评分的item的预估计得分。
estimatedScore = estMethod(dataMat, user, simMeas, item)
itemScores.append((item, estimatedScore))
return sorted(itemScores, key = lambda jj : jj[1], reverse = True)[:N]
#test cases
dataMat = loadExData()
result = recomend(dataMat, 2)
for item in result:
print item , | true |
f7afb48151f63d5e9c0ea37b342b2a0e94333ce9 | Python | saraswathykrk/Udacity_ML | /evaluation/evaluate_poi_identifier.py | UTF-8 | 2,013 | 3.390625 | 3 | [] | no_license | #!/usr/bin/python
"""
Starter code for the evaluation mini-project.
Start by copying your trained/tested POI identifier from
that which you built in the validation mini-project.
This is the second step toward building your POI identifier!
Start by loading/formatting the data...
"""
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
### add more features to features_list!
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
### your code goes here
from time import time
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size = 0.3, random_state = 42)
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
t0 = time()
clf.fit(features_train, labels_train)
print "Training time :", round(time() - t0,3), "s"
t0 = time()
pred = clf.predict(features_test)
print "Testing time :", round(time() - t0,3), "s"
from sklearn.metrics import accuracy_score
acc = accuracy_score(pred,labels_test)
print "Accuracy is:" , acc
poi_cnt = 0
for i in labels_test:
if i == 1:
poi_cnt = poi_cnt + 1
print "Total number of pois in test set::", poi_cnt
print "Total number of people in test set::", len(features_test)
new_pred = [0.0, 0.0 , 0.0 ,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0 , 0.0,0.0, 0.0]
print accuracy_score(labels_test, new_pred)
print new_pred
print pred
print labels_test
print "New accuracy = Total labelled correctly/total num of items:: 25/29 =",float(round(25/29,6))
#printing precision score and recall score
from sklearn.metrics import precision_score, recall_score
print precision_score(labels_test,pred)
print recall_score(pred,labels_test)
| true |
8ed7e117dfd2899f76a8f95559998a5c733f18b3 | Python | d-realm/python-2 | /testdrive.py | UTF-8 | 891 | 3.328125 | 3 | [] | no_license | #!/usr/bin/python3
from time import sleep
from ev3dev.ev3 import *
# Will need to check EV3 button state
btn = Button()
# Connect motors
rightMotor = LargeMotor(OUTPUT_A)
assert rightMotor.connected, "Error: Right motor not connected"
leftMotor = LargeMotor(OUTPUT_D)
assert leftMotor.connected, "Error: Left motor not connected"
DRIVE_SPEED = 500
REVOLUTION = 360
DIAMETER = 5.6
PI = 3.14
rightMotor.reset() # Set the current angle to 0
leftMotor.reset()
rightMotor.stop_action = "brake"
leftMotor.stop_action = "brake"
# Distance in centimetres
def drive(distance):
angle = distance * REVOLUTION / (DIAMETER * PI)
rightMotor.run_forever(speed_sp = DRIVE_SPEED)
leftMotor.run_forever(speed_sp = DRIVE_SPEED)
while (leftMotor.position < angle):
sleep(0.02)
rightMotor.stop()
leftMotor.stop()
while not (btn.any()):
x = int(input())
drive(x)
| true |
0d50486490360d16766e750f3f05cf07c1cbef3c | Python | AllanOliveiraM/django-graphql-playground | /tests/integration/apps/core/test_models.py | UTF-8 | 1,749 | 2.53125 | 3 | [] | no_license | import pytest
from django.db import IntegrityError
from django_graphql_playground.apps.core.models import Category
from django_graphql_playground.apps.core.models import Ingredient
@pytest.mark.django_db
def test_should_create_category():
fake_category_name = "fake_name_category"
Category.objects.create(name=fake_category_name)
assert Category.objects.all().count() == 1
assert Category.objects.all().first().name == fake_category_name
@pytest.mark.django_db
def test_should_create_an_ingredient_with_its_category():
fake_category_name = "fake_name_category"
fake_category = Category.objects.create(name=fake_category_name)
fake_ingredient_notes = "fake_ingredient_notes"
fake_ingredient_name = "fake_ingredient_name"
Ingredient.objects.create(name=fake_ingredient_name, notes=fake_ingredient_notes, category=fake_category)
fake_ingredient = Ingredient.objects.all().first()
assert fake_ingredient.name == fake_ingredient_name
assert fake_ingredient.notes == fake_ingredient_notes
assert fake_ingredient.category == fake_category
@pytest.mark.skip("The assert might me different depending on the target database")
@pytest.mark.django_db
def test_should_throw_constraint_error_when_category_id_is_invalid():
fake_ingredient_notes = "fake_ingredient_notes"
fake_ingredient_name = "fake_ingredient_name"
with pytest.raises(IntegrityError) as exception:
Ingredient.objects.create(name=fake_ingredient_name, notes=fake_ingredient_notes)
# Sqlite
assert str(exception.value) == "NOT NULL constraint failed: core_ingredient.category_id"
# Postgres
assert str(exception.value).startswith('null value in column "category_id" violates not-null constraint')
| true |
b400e786b9ea8c6f085a0c772b47b43d1738e951 | Python | vmred/Coursera | /coursera/python-basic-programming/week5/hw28.py | UTF-8 | 1,119 | 3.734375 | 4 | [] | no_license | # Петя перешёл в другую школу.
# На уроке физкультуры ему понадобилось определить своё место в строю.Помогите ему это сделать.
# Формат ввода
# Программа получает на вход невозрастающую последовательность натуральных чисел,
# означающих рост каждого человека в строю.
# После этого вводится число X – рост Пети.
# Все числа во входных данных натуральные и не превышают 200
def getOrderNumber(it, value):
it = list(map(int, it.split()))
return next((it.index(i) + 1 for i in it if i < value), len(it) + 1)
print(getOrderNumber(input(), int(input())))
# a = getOrderNumber('165 163 160 160 157 157 155 154', 162)
# e = 3
# assert (a == e), '--> actual: %s, expected %s' % (a, e)
# a = getOrderNumber('165 163 160 160 157 157 155 154', 160)
# e = 5
# assert (a == e), '--> actual: %s, expected %s' % (a, e)
| true |
e8187b3a62bc87d3d975a81164e8eaa779fedcee | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_155/2313.py | UTF-8 | 452 | 3.375 | 3 | [] | no_license | def standing_ovation(audience):
tot = 0
req = 0
for s, n in enumerate(audience):
if tot >= s:
tot += n
else:
req += s - tot
tot += n + s - tot
return req
if __name__ == '__main__':
cases = int(input())
for c in range(cases):
line = input()
audience = list(map(int, list(line.split()[1])))
print("Case #{}: {}".format(c+1, standing_ovation(audience)))
| true |
22b422af45d643314e75d99b45a59b7a56301f8b | Python | avinash-512/YDV | /extract_senses.py | UTF-8 | 976 | 2.890625 | 3 | [] | no_license | from nltk.corpus import wordnet as wn
import operator
import nltk
def words(text):
token = nltk.word_tokenize(text)
tagged = nltk.pos_tag(token)
need_tag = ["NN","JJ","NMP"]
filtered_tag = [item for item in tagged if item[1] in need_tag]
wordset = set()
for x in filtered_tag:
wordset.add(x[0])
return [i for i in wordset]
def make_sense(text):
wordset = words(text)
dictionary = {}
for word in wordset:
dictionary[word] = wn.synsets(word)
return dictionary
def calculate_keysenses(text):
sense_dict = make_sense(text)
sense_count = {}
for word in sense_dict:
for sense in sense_dict[word]:
if sense in sense_count:
sense_count[sense] += 1
else:
sense_count[sense] = 1
sorted_keysenses = sorted(sense_count.items(),key=operator.itemgetter(1))
pct = int(len(text.split(' '))*0.3)
return [x[0] for x in sorted_keysenses]
| true |
9f4f12a98357a312098ead394c6ed7c681dbc82b | Python | antwan2000arp/pyppet | /pyppet/wnck-helper.py | UTF-8 | 1,765 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/python
# apt-get install python-wnck
# apt-get install gnome-python-extras
# can not be run in the the same process with gtk3 #
import os, sys, time
try:
import wnck
except ImportError:
print('ERROR: wnck not installed - Ubuntu users can: "sudo apt-get install python-wnck"')
sys.exit()
NAME = 'Blender'
use_startswith = '--use-starts-with' in sys.argv
use_icon_name = '--use-icon-name' in sys.argv
if len(sys.argv) >= 1: NAME = sys.argv[-1]
print('wnck-helper looking for window named:', NAME)
screen = wnck.screen_get_default()
screen.force_update()
if '--close-all-windows' in sys.argv: # not working!
#./wnck-helper.py --close-all-windows "Untitled window"
for w in screen.get_windows():
print( 'ICON NAME:', w.get_icon_name() )
win = None
if w.get_icon_name() == NAME:
win = w
elif use_startswith and w.get_icon_name().startswith(NAME):
win = w
if win:
print('closing ->', win)
pid = win.get_pid()
print('pid', pid)
if pid:
os.system( 'kill -9 %s' %pid )
win.shade()
win.minimize()
win.close( -1 )
else:
wins = []
for w in screen.get_windows():
if use_icon_name:
print( 'ICON NAME:', w.get_icon_name() )
if w.get_icon_name() == NAME:
wins.append( w )
elif use_startswith and w.get_icon_name().startswith(NAME):
wins.append( w )
else:
if use_startswith and w.get_name().startswith( NAME ):
wins.append( w )
elif NAME in w.get_name():
wins.append( w )
for win in wins:
## make_below allows drag and drop/move in parent window to work
## otherwise parent window must use always-on-top ##
win.make_below() # REQUIRED FOR BLENDER
win.shade()
print( 'XID=%s' %win.get_xid() ) ## this is read on stdout in core.py API do_xembed
screen.force_update()
| true |
11b0b1ed840da20b3b39748aea740874673cb658 | Python | willdickson/virtual_desert | /nodes/rolling_circular_mean.py | UTF-8 | 653 | 3.359375 | 3 | [
"MIT"
] | permissive | import numpy as np
class RollingCircularMean(object):
def __init__(self, size=800):
self.size = size
self.data = []
def insert_data(self,item):
self.data.append(np.deg2rad(item))
if len(self.data) > self.size:
self.data.pop(0)
def value(self):
if self.data:
return np.rad2deg(circmean(self.data))
else:
return 0.0
# utility func
# ---------------------------------------------------------------------------------
def circmean(alpha,axis=None):
mean_angle = np.arctan2(np.mean(np.sin(alpha),axis),np.mean(np.cos(alpha),axis))
return mean_angle
| true |
53f0ffafb2eb279387c1553fa65d5c6eca5faf45 | Python | sfpiano/gdbpy | /gui.py | UTF-8 | 3,113 | 2.828125 | 3 | [] | no_license | import gtk
import gdb
import os
OK_CODE = 1
CANCEL_CODE = 2
def end_match(completion, entrystr, iter, data):
modelstr = completion.get_model()[iter][0]
return entrystr in modelstr
class MyWindow(gtk.Window):
def init(self, g):
self.g = g
def on_button_clicked(self, widget):
print "Hello World"
def __init__(self):
gtk.Window.__init__(self)
self.menu_bar = gtk.MenuBar()
self.build_menu()
self.build_dialogs()
table = gtk.Table(4, 2, True)
table.attach(self.menu_bar, 0, 2, 0, 1)
scrolled_window = gtk.ScrolledWindow(hadjustment=None, vadjustment=None)
textview = gtk.TextView(buffer=None)
textview.set_editable(False)
self.textbuffer = textview.get_buffer()
scrolled_window.add_with_viewport(textview)
# Left-attach, right, top, bottom
table.attach(scrolled_window, 0, 2, 1, 2)
self.button = gtk.Button(label="Click Here")
self.button.connect("clicked", self.on_button_clicked)
table.attach(self.button, 0, 1, 2, 3)
self.label = gtk.Label()
self.label.set_text("This is a left-justified label.\nWith multiple lines.")
table.attach(self.label, 0, 2, 3, 4)
self.add(table)
def match_cb(self, completion, model, iter):
print model[iter][0], 'was selected'
def handle_menu_file_open(self, widget, string):
mydata = self.g.execute("info sources", False, True)
self.liststore.clear()
for l in mydata.split('\n'):
if len(l) == 0 or l[-1] == ':':
continue
for sl in l.split(','):
self.liststore.append([sl.strip()])
self.completion.set_model(self.liststore)
response = self.open_source_dialog.run()
if response == OK_CODE:
filename = self.entry.get_text()
with open(filename) as f:
self.textbuffer.set_text(f.read())
self.open_source_dialog.hide()
def build_menu(self):
menu = gtk.Menu()
sub_item = gtk.MenuItem("Open")
sub_item.connect("activate", self.handle_menu_file_open, "Open")
menu.append(sub_item)
root_menu = gtk.MenuItem("File")
root_menu.set_submenu(menu)
root_menu.show()
self.menu_bar.append(root_menu)
def build_dialogs(self):
self.open_source_dialog = gtk.Dialog(
title='Open Source',
parent=None,
flags=0,
buttons=('OK', OK_CODE, 'Cancel', CANCEL_CODE))
self.entry = gtk.Entry()
self.completion = gtk.EntryCompletion()
self.liststore = gtk.ListStore(str)
for s in ['apple']:
self.liststore.append([s])
self.completion.set_model(self.liststore)
self.entry.set_completion(self.completion)
self.completion.set_text_column(0)
self.completion.set_minimum_key_length(3)
self.completion.set_match_func(end_match, None)
self.entry.show()
self.open_source_dialog.vbox.pack_start(self.entry, True, True, 0)
| true |
91202843e341ef9203d48d762a4bcb65b0cec946 | Python | reidhowdy/solar-system-api | /tests/test_routes.py | UTF-8 | 1,738 | 2.921875 | 3 | [] | no_license | def test_get_all_planets_with_no_records(client):
response = client.get("/planets")
response_body = response.get_json()
assert response.status_code == 200
assert response_body == []
def test_get_one_book(client, three_saved_planets):
response = client.get("/planets/1")
response_body = response.get_json()
assert response.status_code == 200
assert response_body == {
"id" : 1,
"name" : "Mars",
"description" : "Hot and dusty",
"color" : "red"
}
def test_get_one_book_but_is_empty_returns_error(client):
response = client.get("/planets/1")
response_body = response.get_json()
assert response.status_code == 404
assert response_body == f"Planet 1 not found"
def test_get_all_planets(client, three_saved_planets):
planet1 = dict(id=1, name="Mars", description="Hot and dusty", color="red")
planet2 = dict(id=2, name="Earth", description="Nice", color="blue marble")
planet3 = dict(id=3, name="Saturn", description="Has some rings", color="yellow-brown")
response = client.get("/planets")
response_body = response.get_json()
assert response.status_code == 200
assert len(response_body) == 3
assert planet1 in response_body
assert planet2 in response_body
assert planet3 in response_body
def test_posts_one_planet_successfully(client):
post_planet = dict(name="Post", description="Posty", color="Postingggg")
response = client.post("/planets", json={"name" : "Post", "description" :"Posty", "color" : "Postingggg"})
response_body = response.get_json()
assert response.status_code == 201
assert response_body == {"id" : 1, "name" : "Post", "description" :"Posty", "color" : "Postingggg"}
| true |
b90bf802a36f3f1609c7a2068846f7fdd252e2cb | Python | yl812708519/tornado_test_web | /test/common/test_comm.py | UTF-8 | 268 | 2.75 | 3 | [] | no_license | from sqlalchemy.sql import expression
__author__ = 'wangshubin'
a = [1, 2, 3]
b = [4, 5, 6]
c = a+b
# print(c)
class A(object):
a = 1
b = 2
arr = (A.a == "a", A.b == 1, "c" >= "a")
for cri in list(arr):
e = expression._literal_as_text(cri)
print e
| true |
d4582fd71a1c67bdf39fe77a655e694073213933 | Python | thailh12/wier-backend | /cache_utils.py | UTF-8 | 1,034 | 2.59375 | 3 | [] | no_license | import os.path
import pickle as pkl
import requests
import shutil
def get_response(url, enable_dump=False):
global saved_data
if 'res_dict' not in saved_data:
saved_data['res_dict'] = {}
res_dict = saved_data['res_dict']
if url in res_dict:
response = res_dict[url]
else:
response = requests.get(url)
res_dict[url] = response
if enable_dump:
dump_saved_data()
return response
def saved_data_init():
global saved_data
if 'saved_data' not in globals():
if os.path.exists('saved_data.pkl'):
with open('saved_data.pkl', 'rb') as f:
saved_data = pkl.load(f)
else:
saved_data = {}
def dump_saved_data():
global saved_data
cwd = os.getcwd()
shutil.copyfile(cwd + '\\saved_data.pkl', cwd + '\\saved_data.pkl.backup')
with open('saved_data.pkl', 'wb') as f:
pkl.dump(saved_data, f)
print("Saved to saved_data.pkl!!!")
saved_data_init() | true |
c9dfa4327aaa2af2e49b567c0bc01a3b72de56ff | Python | Fiquee/algo-lab | /Lab1/l1q11.py | UTF-8 | 144 | 3.25 | 3 | [] | no_license | def newList(a):
c = []
c.append(a[0])
c.append(a[len(a)-1])
return c
a = [5, 10, 15, 20, 25]
list_ = newList(a)
print(list_)
| true |
07a8fc8fd7e5c5d96fb1b5d48f8243ad5d863133 | Python | sindusistla/Algorithms-and-Datastructures | /OperatingSystem_Assignment02/Trans_three_pairs.py | UTF-8 | 2,585 | 3.21875 | 3 | [] | no_license | import threading
import time
import pymongo;
class myThread (threading.Thread):
def __init__(self, threadID, TransactionList,Pairs):
threading.Thread.__init__(self)
self.threadID = threadID
self.Pairs=Pairs
self.TransactionList=TransactionList
def run(self):
#print("Starting ", self.threadID)
FormPairs(self.threadID, self.TransactionList, self.Pairs)
#print("Exiting " ,self.threadID)
def FormPairs(threadID, TransactionList, Pairs):
tuplelist=[]
tup=()
for i in range(0,len(TransactionList)):
for k in range(i+1,len(TransactionList)):
for j in range(k+1,len(TransactionList)):
tup = tuple((TransactionList[i], TransactionList[k], TransactionList[j]));
tuplelist.append((tup));
threadLock.acquire()
Pairs.append(tuplelist);
threadLock.release();
def Open_file(Transactions):
filename = "C:\\Users\\sindu\\Google Drive\\Oakland University1\\Operating Systems\\Assignment 2\\TransactionData.txt";
fileptr = open(filename, "r")
LineList = []
count=0;
for line in fileptr:
LineList = list(map(int, line.strip().split(' ')))
Transactions.append(LineList)
count=count+1;
'Generate threads for list of lists'
print("Count of lines",count)
# Frist open the file and read the contents into a list of lists
Transactions = []
Open_file(Transactions)
TransactionThreads=[]
Pairs=[]
PairsList=[]
PairDictionary ={}
threadLock = threading.Lock()
#print(Transactions)
# Create new threads for each transaction
#len(Transactions)-1
for trans in range(0,40000):
'Create and start threads'
thread =myThread(trans, Transactions[trans], Pairs)
thread.start()
TransactionThreads.append(thread)
for t in TransactionThreads:
t.join();
print("Completed Joining threads")
PairsCount=0
# Now we have all the pairs in Pairs list add them to a dictionary traverse through the pairs dictionary
for PairList in Pairs:
# We have pairs list her
#print( "\n",PairList)
for OnePair in PairList:
PairsCount=PairsCount+1;
if OnePair not in PairDictionary:
PairDictionary[OnePair]=1;
else:
# That pair is present in the list
Value=PairDictionary.get(OnePair);
# Update with new count
PairDictionary[OnePair]=Value+1;
print("Max is : ",PairDictionary[max(PairDictionary, key=PairDictionary.get)],max(PairDictionary, key=PairDictionary.get))
print("No of pairs :",PairsCount)
#print (PairDictionary)
print("Exiting Main Thread") | true |
c9132b759715f0b780d529a339c30b933909a47f | Python | jvpereirarocha/fastapi-fundamentals | /app/schemas.py | UTF-8 | 1,962 | 2.625 | 3 | [] | no_license | from typing import List, Optional, Set
from pydantic import BaseModel, Field, HttpUrl, EmailStr
class Image(BaseModel):
url: HttpUrl
name: str
class Item(BaseModel):
name: str
description: Optional[str] = None
price: float
tax: Optional[float] = None
comments: Optional[List[str]] = []
tags: Optional[Set[str]] = set()
image: Optional[Image] = None
class State(BaseModel):
uf: str
name: str
class Address(BaseModel):
street: str = Field(..., example='Rua X')
number: int
complement: Optional[str] = Field(None, example='Proximo a Praça X')
district: str = Field(..., example='Dom Bosco')
city: str
state: State
country: str
class User(BaseModel):
username: str
full_name: Optional[str] = None
class Config:
schema_extra = {
'example': {
'username': 'foo',
'full_name': 'bar'
}
}
class DeclaredItem(BaseModel):
name: str = Field(..., title='Item name', min_length=3, max_length=200)
description: Optional[str] = Field(
None, title='The item description', max_length=300
)
price: float = Field(
...,
gt=0,
description='The price of the item must be greather than zero'
)
tax: float = Field(None, title='The item tax')
class Product(BaseModel):
code: str
price: float
quantity: Optional[int]
class UserIn(BaseModel):
username: str
password: str
email: EmailStr
full_name: Optional[str] = None
class UserOut(BaseModel):
username: str
email: EmailStr
full_name: Optional[str] = None
class GetUser(BaseModel):
username: Optional[str] = None
email: Optional[EmailStr] = None
class ProgrammingLanguage(BaseModel):
name: str
category: str
languages = {
"ruby": {"name": "Ruby", "category": "Back-End"},
"csharp": {"name": "C#", "category": "Back-End"}
}
| true |
e1b979bbe2fb68ed5fc9f0b99aaadc7354f65136 | Python | Simoniezi/Practice-Python | /Exercise 7.py | UTF-8 | 175 | 3.765625 | 4 | [] | no_license | # Exercise 7
# List Comprehensions
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
b = [x for x in a if x % 2 == 0]
print(b)
# Made by Simoniezi
# Discord: Simoniezi#7138 | true |
987c890221ebfd5d8f9a970abf3fb21754af90c4 | Python | reshma57/textReader | /login.py | UTF-8 | 1,036 | 2.65625 | 3 | [
"MIT"
] | permissive | import os
from passlib.apache import HtpasswdFile
import json
# function for adding User
def add_user(username,password):
cwd = os.path.abspath(__file__)[:-8]
if os.path.exists(cwd+".htpasswd") == False:
ht = HtpasswdFile(cwd+".htpasswd", new=True)
result = ht.set_password(username, password)
ht.save()
return result
else:
ht = HtpasswdFile(cwd+".htpasswd")
result = ht.set_password(username, password)
ht.save()
if result == False:
return True
else:
return False
# check username and password
def check_user_password_htpasswd(username,password):
cwd = os.path.abspath(__file__)[:-8]
ht = HtpasswdFile(cwd+".htpasswd")
return ht.check_password(username, password)
def login(data):
data_dict = json.loads(data)
username = data_dict["username"]
password = data_dict["password"]
if check_user_password_htpasswd(username,password):
return "True"
else:
return "invalid password"
| true |
a2a5c6501f9fc10c44bdc831fd41eb861bd66ac2 | Python | aanavas/s2project | /lexlearner/src/convert_dictionary_format.py | UTF-8 | 2,132 | 2.921875 | 3 | [] | no_license | # convert_dictionary_format.py
# ============================
# 0.01.001 10-Sep-2007 jmk Created to convert festival to janus dict formats.
# ----------------------------
import getopt
import os,sys
import DictionaryIO
from Column import column
# ==============
# Mainline code.
# ==============
if __name__ == '__main__':
flag_list = [('help', 'this command'),
('festdict=', 'filename of festival-format pronunciation dictionary'),
('janusdict=', 'filename of janus-format pronunciation dictionary')]
try:
opt_list, prog_args = getopt.getopt (sys.argv[1:], '', column(flag_list))
option_tbl = dict (opt_list)
except getopt.GetoptError, msg:
print 'Error', msg
sys.exit(' type --help for program options\n')
# Print out the options if requested.
if option_tbl.has_key('--help') or len(sys.argv) == 1:
print 'python', sys.argv[0]
for option_flag, description in flag_list:
print '%12s %s' %(option_flag, description)
print
sys.exit()
# Read in the festival format dictionary and convert it to janus format.
# If the janus output filename is not provided then the festival dictionary
# is read in but nothing is done with it.
word_pronun_list = []
if '--festdict' in option_tbl:
lexicon_file = option_tbl['--festdict']
if os.path.exists(lexicon_file) and os.path.isfile(lexicon_file):
word_pronun_list = DictionaryIO.ReadFestivalDictionary (lexicon_file, words_as_charseq=False)
print 'Reading', lexicon_file
print ' num words read', len(word_pronun_list)
if '--janusdict' in option_tbl:
output_file = option_tbl['--janusdict']
DictionaryIO.WriteJanusDictionary (output_file, word_pronun_list)
print 'Writing', output_file
| true |
29bf72973651981327655643de162e615e737af1 | Python | organizejs/brandnewroman | /app/brandnewroman/mailchimp.py | UTF-8 | 1,270 | 2.75 | 3 | [] | no_license | import requests
from mailchimp3 import MailChimp
from urllib3.exceptions import HTTPError
class MailchimpException(Exception):
pass
class MailchimpClient():
client = None
list_id = None
def set_credentials(self, username: str, key: str):
self.client = MailChimp(key, username)
def set_list_id(self, list_id: str):
self.list_id = list_id
def add_or_update_user(self,
email: str,
status: str = 'subscribed',
first_name: str = '',
last_name: str = '',
zipcode: str= ''):
'''
add a new subscriber to mailchimp
if subscriber already exists, do not throw error.
'''
try:
# self.client.lists.members.create(self.list_id, {
self.client.lists.members.create_or_update(self.list_id, email, {
'email_address': email,
'status_if_new': 'subscribed',
'status': status,
'merge_fields': {
'FNAME': first_name,
'LNAME': last_name
}
})
except KeyError as e:
raise e
except HTTPError as e:
if e.response.status_code == 400:
# subscriber probably exists
json = e.response.json()
# raise MailchimpError(json.get('errors') or json.get('detail') or json)
return False, json.get('detail')
else:
print("ERROR HERE")
raise
return True, None
mc = MailchimpClient()
| true |
035abe7eb18f4ba665c5ab1a9a7e9603d681c5b9 | Python | hasnatosman/class_object | /class_object.py | UTF-8 | 1,088 | 3.625 | 4 | [] | no_license | import turtle
tom = turtle.Turtle()
tom.speed(50)
def draw_circle():
tom.left(90)
counter = 0
while counter < 72:
tom.color("red")
tom.circle(140)
draw_circle()
tom.right(5)
counter += 1
counter = 0
while counter < 72:
tom.color("black")
tom.circle(120)
draw_circle()
tom.right(5)
counter += 1
counter = 0
while counter < 72:
tom.color("yellow")
tom.circle(100)
draw_circle()
tom.right(5)
counter += 1
counter = 0
while counter < 72:
tom.color("green")
tom.circle(80)
draw_circle()
tom.right(5)
counter += 1
counter = 0
while counter < 72:
tom.color("indigo")
tom.circle(60)
draw_circle()
tom.right(5)
counter += 1
counter = 0
while counter < 72:
tom.color("blue")
tom.circle(40)
draw_circle()
tom.right(5)
counter += 1
counter = 0
while counter < 72:
tom.color("violet")
tom.circle(20)
draw_circle()
tom.right(5)
counter += 1
turtle.exitonclick()
turtle.hideturtle()
| true |
8a2495f191eb59d986d8261cd70165a0670d34a6 | Python | inprogress/vertebrale | /vertebrale/importer.py | UTF-8 | 1,448 | 2.859375 | 3 | [
"MIT"
] | permissive | import csv
from vertebrale.database import db_session
from vertebrale.models import Category, CategoryTranslation, LanguageCode
def startImport():
with open('data/GenericFoods.csv', newline='') as csvfile:
reader = csv.reader(csvfile)
rows = list(reader)
row = rows[0]
row2index = dict()
for i, e in enumerate(row):
row2index[e] = i
d = row2index["category D"]
f = row2index["category F"]
i = row2index["category I"]
e = row2index["category E"]
data = list(map(lambda x: [x[d], x[f], x[i], x[e]], rows[1:]))
result = {}
for i in data:
if len(i[0]) == 0:
continue
if i[0] not in result:
result[i[0]] = i
for key, category in result.items():
c = Category()
c.translations.append(CategoryTranslation(
languageCode=LanguageCode.GERMAN, name=category[0]))
c.translations.append(CategoryTranslation(
languageCode=LanguageCode.FRENCH, name=category[1]))
c.translations.append(CategoryTranslation(
languageCode=LanguageCode.ITALIAN, name=category[2]))
c.translations.append(CategoryTranslation(
languageCode=LanguageCode.ENGLISH, name=category[3]))
db_session.add(c)
db_session.commit()
print("Import categories done!")
| true |
eed00d3a49fbce9284e870cf51dcd404dd2d89d0 | Python | myton/head_first_python | /ch06/process_data.py | UTF-8 | 706 | 2.984375 | 3 | [] | no_license | def sanitize (time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return time_string
(mins, secs) = time_string.split(splitter)
return (mins + '.' +secs)
def pross_data(file_name):
try:
with open(file_name, 'r') as data:
_data = (data.readline().strip().split(','))
(name, dob) = _data.pop(0), _data.pop(0)
return (name + "'s fastest times are: " + str(sorted(set([sanitize(t) for t in _data]))[0:3]))
except IOError as err:
print("file is missing :" + err)
return (None)
print(pross_data('james2.txt'))
print(pross_data('julie2.txt'))
print(pross_data('mikey2.txt'))
print(pross_data('sarah2.txt'))
| true |
63f0d329f975ed6aa7f3b91a4a692c079866b8c5 | Python | GopiKarthi/Hackathon5 | /hackathon5/ApiHandler/views.py | UTF-8 | 2,728 | 2.65625 | 3 | [] | no_license | # Create your views here.
import json
from datetime import datetime as dt
from django.http import HttpResponse
import random
from util.util import dbconnect, randomDate
import pickle
from util.util import *
from hackathon5 import *
import simplejson
import sys
sys.path.append("/host/DataHack/")
def AppRateReal(request):
"""To calculate and return the Application rate
Realtime"""
#if not request.method == 'POST':
# return HttpResponse('Method not allowed', status=402)
#req_type = request.POST.get('type', '')
#if req_type == 'all':
q= """select count(*) from application_application where id like "%s%%";"""%dt.strftime(dt.now(),'%Y%d%m%H')
dbconnect
return HttpResponse(q)
def CustData(request):
datas=[]
# for i in range(10):
# data.append({'date' : randomDate('1-Jan-16', '31-Dec-16', random.random()),
# 'close' : (random.random() * 500 )})
# for year in range(2009,int(request.GET.get("y"))+1):
year = request.GET.get("y")
with open("/host/DataHack/LoanBooked_2017.pkl", 'rb') as f:
datas = pickle.load(f)
accum = 0.0
profit = 0.0
for i in datas:
accum=accum+i["Loans"]
profit=profit+i["DebitCredit"]
i["Loans"] = accum
i["profit"] = profit-accum*150 # 150 is avg cost for one loan
accum=accum+i["Loans"]
profit=profit+i["DebitCredit"]
i["Loans"] = accum
i["profit"] = profit-accum*150 # 150 is avg cost for one loan
t = time.strptime(i['date'], '%d-%b-%y')
now = datetime.date(int(year), 1, 1)
if t.tm_year>=now.year:
i["Loans"],i["profit"] = None,None
return HttpResponse(json.dumps(datas))
def MapPlot(request):
'''Sends the data to plot the map of customers applying in UK'''
plot_map = {'2009':'points_200912.pkl','2010':'points_201012.pkl','2011':'points_201112.pkl','2012':'points_201212.pkl','2013':'points_201312.pkl','2014':'points_201412.pkl','2015':'points_201512.pkl','2016':'points_201612.pkl'}
year = request.GET.get("year")
with open(plot_map[year], 'rb') as f:
points = pickle.load(f)
coords = get_coordinates(points)
male = 0
female = 0
for i in coords:
if i[3] == 'Male':
male = male + 1
else:
female = female + 1
total_cust = male + female
male_percent = (float(male)/float(total_cust)) * 100
female_percent = (float(female)/float(total_cust)) * 100
male_percent = '%.2f' % male_percent
female_percent = '%.2f' % female_percent
return HttpResponse(simplejson.dumps({'coords':coords,'male':str(male_percent)+'%','female':str(female_percent)+'%'}))
| true |
4530eccbd1b0a1eba33c6bf18eeb096d61592030 | Python | yacoubismus/python | /python-course/homework/seventh/Determinant_matrix.py | UTF-8 | 916 | 3.375 | 3 | [] | no_license | import numpy as np
from copy import copy, deepcopy
det = 0
def matrix_scalar_mul(a, c):
m = len(a) # number of rows of A
n = len(a[0]) # number of cols of A
return [[c * a[i][j] for j in range(n)] for i in range(m)]
def determinant_matrix(m):
global det
for x in list(range(len(m))):
#det += m[0][x] * (-1) ** (2 + x) * determinant_matrix(matrix_get_submatrix(m, x, x))
det += (-1) * matrix_scalar_mul(determinant_matrix(matrix_get_submatrix(m, x, x)),m[0][x])
print("determinant:", det)
return det
def matrix_get_submatrix(m, i, j):
submat = deepcopy(m)
submat = np.delete(submat, i, axis=0)
submat = np.delete(submat, j, axis= 1)
return submat
if __name__ == "__main__":
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=int)
print(matrix_get_submatrix(a,0,1))
print(a)
determinant_matrix(a) | true |
f02bbd33823eb2158615a6fee4a43c182ef8969e | Python | neilb14/cryptotracker | /summary.py | UTF-8 | 1,064 | 2.6875 | 3 | [] | no_license | import sys, argparse
import urllib.request
from cryptotracker import row, summary, summary_writer
from cryptotracker.datastore import Datastore
from cryptotracker.price_service import PriceService
def main(args):
parser = argparse.ArgumentParser(description='Summary - prints a summary of your portfolio')
parser.add_argument('-x', '--dir', default='./data', help='directory containing csv data files. If folder doesnt already exist it will be created.')
parser.add_argument('-v', '--value', help='include current value of portfolio.', action='store_true')
config = parser.parse_args(args)
price = {}
if config.value:
price['BTC'] = PriceService().get_price('BTC')
price['BCH'] = PriceService().get_price('BCH')
price['ETH'] = PriceService().get_price('ETH')
price['LTC'] = PriceService().get_price('LTC')
ds = Datastore(config.dir)
for coin_summary in summary_writer.write(summary.build(ds.read_all(), price), price):
print(coin_summary)
if __name__ == '__main__':
main(sys.argv[1:]) | true |
47ee317a96b6a3b387549e77953433bbde435f58 | Python | NviCoder/machine_learning_ramzor | /ramzor.py | UTF-8 | 3,287 | 2.609375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import math
def get_rank(N, P, G):
k, m = 2.0, 8.0
if (pd.isna(N) or pd.isna(G)):
NGG=1
elif (N==0 or G==0):
NGG=1
elif (math.isinf(N) or math.isinf(G)):
NGG = math.exp(1)
else:
NGG = N*G*G
P = 0 if pd.isna(P) else min(P,1.0)
score = k + math.log(NGG) + P/m
return min(max(score,2.0),10.0)
csv_ver30122020 = False
if csv_ver30122020:
data_covid = pd.read_csv("corona_city_table_ver_0028.csv", sep=",", header=0)
data_covid.drop(["City_Name", "Cumulated_recovered", "Cumulated_deaths", "Cumulated_number_of_diagnostic_tests"],
axis='columns', inplace=True)
else:
data_covid = pd.read_csv("corona_city_table_ver_08022021.csv", sep=",", header=0)
data_covid.drop(["City_Name", "Cumulated_recovered", "Cumulated_deaths", "Cumulated_number_of_diagnostic_tests", "colour","final_score"],
axis='columns', inplace=True)
data_covid["Date"] = pd.to_datetime(data_covid["Date"], dayfirst=True)
data_covid["Cumulative_verified_cases"] = pd.to_numeric(data_covid["Cumulative_verified_cases"])
data_covid[["Ni","Pi","Gi","Rank"]] = -1.0
data_pop = pd.read_csv("population.csv", sep=",", header=0)
processed_pop = pd.read_csv("population_for_cpa.csv", sep=",", header=0)
#drop cities with no data about population
data_pop = data_pop.dropna(subset=['popTot'])
data_covid = data_covid.drop(data_covid[~data_covid.City_Code.isin(data_pop.city_code)].index)
data_covid = data_covid.drop(data_covid[~data_covid.City_Code.isin(processed_pop.city_code)].index)
#drop rows with <15 verified
data_covid.drop(data_covid[(data_covid['Cumulative_verified_cases'] < 15)].index, inplace=True)
data_covid.reset_index(drop=True, inplace=True)
for index, row in data_covid.iterrows():
current_city_code = row['City_Code']
current_city_pop = data_pop.query('city_code == @current_city_code')['popTot'].values[0]
date_week_ago = row['Date'] - pd.DateOffset(days=7)
row_week_ago = data_covid.query('Date == @date_week_ago & City_Code == @current_city_code')
if row_week_ago.empty:
data_covid.at[index, 'Ni'] = pd.NA
data_covid.at[index, 'Pi'] = pd.NA
data_covid.at[index, 'Gi'] = pd.NA
else:
week_new_verified = data_covid.at[index, 'Cumulative_verified_cases'] - row_week_ago.iloc[0]['Cumulative_verified_cases']
data_covid.at[index, 'Ni'] = week_new_verified/(current_city_pop / 10000.0)
data_covid.at[index, 'Pi'] = week_new_verified / (data_covid.at[index, 'Cumulated_number_of_tests'] - row_week_ago.iloc[0]['Cumulated_number_of_tests'])
#set Gi
Ni_week_ago = row_week_ago.iloc[0]['Ni']
if pd.isna(Ni_week_ago):
data_covid.at[index, 'Gi'] = pd.NA
else:
# this '2' is a guess for suitable value
data_covid.at[index, 'Gi'] = 2 if Ni_week_ago==0 else (week_new_verified/(current_city_pop / 10000.0) ) / Ni_week_ago
#set ramzor rank
data_covid.at[index, 'Rank'] = get_rank(N=data_covid.at[index, 'Ni'], P=data_covid.at[index, 'Pi'], G=data_covid.at[index, 'Gi'])
if csv_ver30122020:
data_covid.to_csv('ramzor.csv', sep=',')
else:
data_covid.to_csv('ramzor2.csv', sep=',', index_label='index') | true |
419150efa7b0a55b13963e417853b773f33f9b48 | Python | DanielDynamics/AddressBook | /AddressBook.py | UTF-8 | 1,159 | 3.6875 | 4 | [] | no_license | dict1 = {'Daniel':'1000', 'Lucy':'1001', 'Haha':'1002'}
while 1:
print('-Welcome to book program-\n'+'-1:Find contact number-\n'+'-2:Add new contact-\n'+'-3:Delete contact-\n'+'-4:Edit Contact-\n'+'-5:Exit')
n = int(input("Enter the instruction:"))
if n==1:
name = input("Enter contact name:")
if name in dict1:
print(name+':'+dict1[name])
else:
print(name + ' is not exist')
elif n==2:
name = input("Enter new contact name:")
num = input("Enter new contact num:")
dict1[name] = num
elif n==3:
name = input("Enter delete name:")
del dict1[name]
print(name+' is moved')
elif n==4:
name = input("Enter contact name:")
if name in dict1:
print('The contact is exist -->> '+name+' : '+dict1[name])
q = input('Edit contact info\(YES\\NO\)?')
if q=='YES':
num = input('Enter new num:')
dict1[name] = num
else:
print(name + ' is not exist')
elif n==5:
break
else:
n = int(input("Re-Enter the instruction:"))
| true |
d1ffc011daf5da403f8f25d2458315735a8ef92c | Python | Priyojeet/man_at_work | /se2tsk1es1.2.py | UTF-8 | 566 | 3.828125 | 4 | [] | no_license | def myfilter(func, arg):
l = []
for i in arg:
if func(i):
l.append(i)
return l
def isprime(arg):
prime = True
if(arg>1):
for i in range(2, arg):
if(arg%i == 0):
prime = False
if(prime==True):
return True
else:
return False
list = []
length = int(input("enter the length of your list:-"))
for item in range(length):
i = int(input("enter the element of your list:-"))
list.append(i)
print(list)
print(myfilter(isprime, list))
| true |
ad1b132535929d3b1a9bc23741a66e87aebbd415 | Python | awant/arae | /models/kenlm_model.py | UTF-8 | 2,046 | 2.59375 | 3 | [] | no_license | import os
import math
import subprocess
import kenlm
from tempfile import TemporaryDirectory
from utils import dump_lines
from batchifier import Batchifier
class KenlmModel(object):
LMPLZ_PATH = '/usr/local/bin/lmplz'
def __init__(self, model):
self.model_path = None
if isinstance(model, str):
self.model_path = model
model = kenlm.Model(model)
self.model = model
@classmethod
def build(cls, sentences, **kwargs):
# sentences: list of sents without <sos>, <eos> or a filepath
tmpdir = TemporaryDirectory()
gen_model_path = os.path.join(tmpdir.name, 'tmp_knlm.arpa')
tmp_filepath = os.path.join(tmpdir.name, 'sents_tmp.txt')
dump_lines(tmp_filepath, sentences)
params = {
'lmplzp': kwargs.get('lmplz_path', cls.LMPLZ_PATH),
'N': 5, 'trainp': tmp_filepath,
'modelp': gen_model_path,
'S': kwargs.get('compressing_rate', 20)
}
template_cmd = '{lmplzp} -o {N} '
if params['S']:
template_cmd += '-S {S}% '
template_cmd += '< {trainp} > {modelp}'
cmd = template_cmd.format(**params)
try:
print('Building a model, cmd: {}'.format(cmd))
subprocess.call(cmd, shell=True)
print('Built kenlm model, N = {}, path: {}'.format(params['N'], params['modelp']))
model = kenlm.Model(params['modelp'])
return cls(model)
except:
print('Can\'t build kenlm model, probably because of lack of memory, try to change -S')
return None
def _get_sentences_ppl(self, sentences):
assert isinstance(sentences, list)
n_tokens, nll10 = 0, 0
for sent in sentences:
sent = sent.strip()
nll10 += -self.model.score(sent)
n_tokens += len(sent.split())
ppl = math.pow(10., nll10 / n_tokens)
return ppl
def get_ppl(self, sentences):
return self._get_sentences_ppl(sentences)
| true |
70bbbbf3d6a357881d56c3fbed7bd5cad1c5b2b1 | Python | JMakkonen/Pattern_Discovery_Coursera | /P1026.py | UTF-8 | 3,494 | 3.5625 | 4 | [] | no_license | # P1026. For Pattern Recognition course.
'''
This file contains some functions created for the Pattern
Discovery course on Coursera.
create_dataset(n_tid,n_item,max_item,i_names)
- creates datasets. The names part is only patrially implemented.
save_dataset(data_dict,filename)
- saves a dataset to file.
read_dataset(filemane)
- loads a file created with save above and returns a dict.
one_itemsets(data_dict,min_sup)
- returns a dictionary with item as key and count as value
for items that meet or exceed min_sup.
'''
import random # only needed for dataset creation
def create_dataset(n_tid,n_item,max_item,i_names=[]):
'''
This function creates a random dataset which has
n_tid entries. These entries choose amongst n_item
items (which can be given string names through a
list in i_names). Each transaction will have up to
max_item items.
The output is a dictionary such as:
{ 0:{1,2}, 1:{2,3}, 2:{1}, 3:{1,2,3}, ... }
'''
assert(n_item == len(i_names) or len(i_names) == 0)
output = {}
for i in range(n_tid):
item_set = set()
q = random.randint(1,max_item)
for j in range(q):
if i_names==[]:
item_set.add(random.randint(1,n_item))
else:
item_set.add(i_names(1,n_item))
output[i]=item_set
return output
def save_dataset(data_dict,filename):
strlist = []
for x in data_dict:
outstr = str(x)+" "
for y in data_dict[x]:
outstr = outstr+str(y)+" "
strlist.append(outstr)
f=open(filename,"w",encoding='utf-8')
for x in strlist:
f.write(x+"\n")
f.close()
def read_dataset(filename):
f = open(filename,"r",encoding = 'utf-8')
data=f.read()
line_data = data.splitlines()
data_dict = {}
for x in line_data:
y = x.split()
z = set()
for i in range(1,len(y)):
z.add(int(y[i]))
data_dict[int(y[0])] = z
f.close()
return data_dict
'''
Some test code below:
my_data = create_dataset(5,5,3)
save_dataset(my_data,"trial.data")
saved_data = read_dataset('trial.data')
print(my_data)
print(saved_data)
'''
def one_itemsets(data_dict,min_sup):
# This first loop counts occurences and save them
# into a dictionary.
calc = {}
for itemsets in data_dict.values():
for items in itemsets:
if items in calc:
calc[items] += 1
else:
calc[items] = 1
output = {}
# This second loop checks that min_sup requirement
# is met and prunes infrequent items.
for itemset in calc:
if calc[itemset] >= min_sup:
output[itemset] = calc[itemset]
return output
'''
Some test code below:
my_data = create_dataset(7,5,4)
print(my_data)
print(one_itemsets(my_data,3))
'''
def all_subsets(S):
# returns a list of all subsets of S, including the complete set
# S is a set
L = list(S)
out = [S]
if len(L) > 1:
for k in range(len(L)):
newset = set([ L[x] for x in range(len(L)) if x != k])
t = all_subsets(newset)
for x in t:
out.append(x)
output = []
for x in out:
if not(x in output):
output.append(x)
return output
def proper_subsets(S):
# S is a set
# returns a list of all proper subsets of S
ss = all_subsets(S)
pss = [ x for x in ss if x != S ]
return pss
| true |
a4a29938ec613dd5b35100fa1e80071bd08e9448 | Python | danmarkfyn/ubicom | /positioning/project2.py | UTF-8 | 4,343 | 3.078125 | 3 | [] | no_license | import math
import os
import csv
import pprint
import pickle
from math import sqrt,pow
################################
# FUNCTIONS TO MAKE DATASET
################################
#Points shape:
#{"x":xval,"y":yval,"d":distance}
def weightedAverage(points):
weightedValueX = 0
weightedValueY = 0
totalWeights = 0
for point in points:
weight = 1/point["d"]
weightedValueX += float(point["x"])*weight
weightedValueY += float(point["y"])*weight
totalWeights += weight
mx = weightedValueX/totalWeights
my = weightedValueY/totalWeights
return (mx,my)
def getFileList(path):
list_of_files = []
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
if filename.endswith('data_wide.csv'):
list_of_files.append(os.sep.join([dirpath, filename]))
return list_of_files
def makeDataset(directory):
dirList = getFileList(directory)
dataset = []
for path in dirList:
dataset = addFileToDataset(dataset,path)
return dataset
#Dataset structure:
# [
# {
# "signals":{edgeNum:edgeVal...},
# "position":{"x":x,"y":y}
# }
# ]
def addFileToDataset(dataset,fileDir):
with open(fileDir) as csvFile:
csvReader = csv.reader(csvFile,delimiter=",")
first = True
linecount = 0
edges = []
for row in csvReader:
position = {}
signals = {}
if(first):
first = False
edges = row[4:]
else:
position["x"] = row[2]
position["y"] = row[3]
edgeData = row[4:]
for i in range(len(edges)):
signals[edges[i]] = edgeData[i]
dataset.append({"signals":signals,"position":position})
return dataset
################################
# FUNCTIONS TO GET DISTANCE/NN
################################
#Used to sort by distance
def sortFunc(item):
return(item["d"])
def distanceWithType(point1,point2,type=1):
distance = 0
edgeList = list(set(list(point1.keys())+list(point2.keys())))
if(type==1):
for i,key in enumerate(edgeList):
distance += abs(float(point1.get(key,0))-float(point2.get(key,0)))
else:
sum = 0
for i,key in enumerate(edgeList):
sum += (abs(float(point1.get(key,0))-float(point2.get(key,0))))**type
distance = sum**(1/type)
return distance
def hashFunc(dataPoint):
return((dataPoint["x"],dataPoint["y"],dataPoint["d"]))
#inData is just signal strength list
def getKnnAll(dataset,inData,elimRepeats=False,distanceType=1):
nn = []
datasetDistance=[]
tester = {}
for dataPoint in dataset:
distance = 0
signalsPoint = dataPoint["signals"]
distance = distanceWithType(signalsPoint,inData,distanceType)
dataPointDistance = dataPoint["position"]
dataPointDistance["d"] = distance
if(elimRepeats):
if(not(tester.get(hashFunc(dataPointDistance),False))):
datasetDistance.append(dataPointDistance)
tester[hashFunc(dataPointDistance)] = True
else:
datasetDistance.append(dataPointDistance)
#Probably replace this with a priority queue instead of having to sort
datasetDistance.sort(key=sortFunc)
nn = datasetDistance
return nn
def runTest(trainDataset,testDataset,nmin,nstep,nmax,distanceType=1):
distances = {}
totalN = len(testDataset)
for i,testDataPoint in enumerate(testDataset):
print(f"Calculating sample {i+1}/{totalN}")
realx,realy = float(testDataPoint["position"]["x"]),float(testDataPoint["position"]["y"])
nn = getKnnAll(trainDataset,testDataPoint["signals"],distanceType=distanceType)
for i in range(nmin,nmax+1,nstep):
infx,infy = weightedAverage(nn[:i])
infx,infy = float(infx),float(infy)
distanceMid = distances.get(i,[])
distanceMid.append(sqrt(pow(abs(infx-realx),2)+pow(abs(infy-realy),2)))
distances[i]=distanceMid
return distances
def distancesToAvg(distanceDict):
res = {}
for key,value in distanceDict.items():
res[key] = sum(value)/len(value)
return res
if __name__ == "__main__":
directory = "./experiment/experiment1/train"
trainDataset = makeDataset(directory)
directoryTest = "./experiment/experiment1/test"
testDataset = makeDataset(directoryTest)
distances = runTest(trainDataset,testDataset,1,1,300,distanceType=4)
avgDistances = distancesToAvg(distances)
with open('distances.txt','w+') as file:
file.write(pprint.pformat(avgDistances))
with open('distances.data','wb') as file:
pickle.dump(avgDistances,file)
| true |
2ce0f5c70f3b59716e838ae230b96afdc5983733 | Python | scottstamp/jenni | /modules/stocks.py | UTF-8 | 2,027 | 2.5625 | 3 | [
"EFL-2.0"
] | permissive | #!/usr/bin/env python
"""
stocks.py - jenni Stocks Ticker Module
Copyright 2015, Scott Stamp <scott@hypermine.com>
Licensed under IDGAF
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
"""
import json
import urllib
import web
def stocks(jenni, input):
txt = input.group(2)
if not txt:
return jenni.say('Please provide a stock symbol.')
url = 'http://dev.markitondemand.com/Api/v2/Quote/json?symbol=%s'
url = url % input.group(2).upper()
## Basic error checking
try:
## Return an error if the page cannot load
page = web.get(url)
except:
return jenni.say('Could not access the stock ticker service')
## load URL and fetch JSON
try:
data = json.loads(page)
except:
return jenni.say('The server did not return anything that was readable as JSON.')
if 'Status' not in data or 'SUCCESS' not in data['Status']:
return jenni.say('The server did not return any usable data, please check your stock symbol.')
name = data['Name']
symbol = data['Symbol']
last_price = data['LastPrice']
change = data['Change']
change_percent = data['ChangePercent']
timestamp = data['Timestamp']
ms_date = data['MSDate']
market_cap = data['MarketCap']
volume = data['Volume']
change_ytd = data['ChangeYTD']
change_percent_ytd = data['ChangePercentYTD']
high = data['High']
low = data['Low']
open = data['Open']
## Format decimals to the second-most significant place
form = u'%.2f'
last_price = form % last_price
change = form % change
change_percent = form % change_percent
change_ytd = form % change_ytd
change_percent_ytd = form % change_percent_ytd
high = form % high
low = form % low
open = form % open
## {Name} - {Symbol} - {LastPrice} - {Change} - {ChangePercent} - {Timestamp}
resp = '{0} ({1}) | Last Price: {2} | Change: {3} ({4}%) | {5}'
jenni.say(resp.format(name, symbol, last_price, change, change_percent, timestamp))
stocks.commands = ['stocks', 'stock', 'ticker']
if __name__ == '__main__':
print __doc__.strip()
| true |
d20311e258c735b77b64344b37671ed15a323d02 | Python | parwell/Scripts | /checkIP.py | UTF-8 | 789 | 3 | 3 | [] | no_license | import urllib.request
import re
import sys
def retrieveIP():
url = "http://checkip.dyndns.org"
query = urllib.request.urlopen(url).read()
return str(query)
def parseIP(query):
ipNumberPattern = "(25[0-4]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
ipRE = "{0}.{1}.{2}.{3}".format(ipNumberPattern,
ipNumberPattern,
ipNumberPattern,
ipNumberPattern)
ipPattern = re.compile(ipRE)
ip = re.search(ipPattern, query)
return ip.group()
def main():
print('Finding IP address...')
try:
myIP = parseIP(retrieveIP())
print(myIP)
except:
print('Unable to connect.')
input()
sys.exit()
if __name__ == "__main__":
main()
| true |
a491ddd9b2f92fe18435f1062e031727b9b4219b | Python | zhuNanyang/deeplearning | /RNN.py | UTF-8 | 5,109 | 2.765625 | 3 | [] | no_license | import copy,numpy as np
import datetime as dt
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('F:/pythonNotebook/data/',one_hot=True)
trainimg=mnist.train.images
trainlabel=mnist.train.labels
testimg=mnist.test.images
testlabel=mnist.test.labels
#L=20000
#trainimg=trainimg[0:L]
#trainlabel=trainlabel[0:L]
def shujuzhuanhuan(i,x,nsteps):
x2=list()
for j in range(100):
x2.append(x[i+nsteps*j])
#x2=np.array(x2)
return x2
def sigmoid(x):
output=1/(1+np.exp(-x))
return output
def sigmoidqiudao(x):
return x*(1-x)
def softmax(x, y):
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs = np.exp(x)
probs /= np.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -np.mean(np.sum(y*np.log(probs)+(1-y)*np.log(1-probs),axis=1))
dx=y-probs
dx /= N
return dx,probs
def tanh(x):
output=np.tanh(x)
return output
def tanhqiudao(x):
output1=1-x**2
return output1
#binary_dim=28
inputdim=28 # Input dimension
nsteps=28 # Input sequence step
hiddendim=128 # The number of hidden layer
outputdim=10 # Output dimension
lr=0.0005 # learning rate
# Initialize weights U,v,w
U=1/5*(2*np.random.random((inputdim,hiddendim))-1)
V=1/5*(2*np.random.random((hiddendim,outputdim))-1)
W=1/5*(2*np.random.random((hiddendim,hiddendim))-1)
U1=U
V1=V
W1=W
# Initialize threshold b,c
b=1/5*(2*np.random.random((1,hiddendim))-1)
c=1/5*(2*np.random.random((1,outputdim))-1)
b1=b
c1=c
train_epoch=100 # training epoch
batch_size=100
accuracy1=list()
for j in range(train_epoch):
start_everytime = dt.datetime.now() # Record each training time
total_batch=int(mnist.train.num_examples/batch_size)
for f in range(total_batch):
batch_xs,batch_ys=mnist.train.next_batch(batch_size) # Get training data
#batch_xs=batch_xs/255
x1=list()
cost=[]
cost1=[]
accuracy1=list()
layer2_deltas=list()
layer1_values=list()
layer1_values.append(np.zeros(hiddendim)) # Intialize the value of hidden layer
# forward propagation
for i in range(nsteps):
X=np.reshape(batch_xs,[-1,inputdim])
# Split data into different inputs
x=shujuzhuanhuan(i,X,nsteps)
x=np.array(x)
#x=x[batch_size*i:batch_size*(i+1)]
layer1=tanh(np.dot(x,U)+np.dot(layer1_values[-1],W)+b) # Get the value of hidden layer
layer2=np.dot(layer1,V)+c # Get the value of output layer
layer1_values.append(layer1) # Save the hidden layer values for each step
error,out=softmax(layer2,batch_ys) # # The value of output layer through softmax classifer
layer2_deltas.append(error) # Save the error value
x1.append(x)
layer1_delta2=np.zeros(hiddendim)
# back propagation
for i in range(nsteps-1):
#x1=np.reshape(batch_xs,[-1,inputdim])
#x1=x1[batch_size*(-i-1):batch_size*(-i-2)]
layer1_now=layer1_values[-i-1] # Get the value of hidden layer at current moment from the forward propagation
layer1_pred=layer1_values[-i-2] # Get the value of hidden layer at previous moment from the forward propagation
layer2_delta=layer2_deltas[-1] # Get the final error value from the forward propagation
# Get the error value of hidden layer
if (i==0):
layer1_delta=(layer1_delta2.dot(W)+layer2_delta.dot(V.T))*tanhqiudao(layer1_now)
else:
layer1_delta=layer1_delta2.dot(W)*tanhqiudao(layer1_now)
#layer1_delta=(layer1_delta2.dot(W)+layer2_delta.dot(V.T))*sigmoidqiudao(layer1_now)
# Get the gradients of weights U,V,W
dV = layer1_values[-1].T.dot(layer2_delta)
dU = x1[-i-1].T.dot(layer1_delta)
dW = layer1_pred.T.dot(layer1_delta)
# Get the gradients of thresholds b,c
db = np.sum(layer1_delta,axis=0)
dc = np.sum(layer2_delta,axis=0)
layer1_delta2=layer1_delta
# Update the weights of U,V,W
W = W+dW*lr
U = U+dU*lr
V = V+dV*lr
# Update the thresholds of b,c
b = b+db*lr
c = c+dc*lr
loss = -np.mean(np.sum(batch_ys*np.log(out)+(1-batch_ys)*np.log(1-out),axis=1)) # Calculate loss function
#cost.append(loss[0])
#cost1.append(cost[-1])
#print(cost1)
end_everytime = dt.datetime.now()
all_everytime = end_everytime-start_everytime
#print(all_everytime)
y_predict = out
y_predict[np.arange(y_predict.shape[0]), np.argmax(y_predict, axis=1)] = 1
accuracy = np.sum(np.argmax(y_predict, axis=1) == np.argmax(batch_ys, axis=1)) * 1.0 / batch_ys.shape[0] # Calculate accuracy value
print(accuracy)
#plt.plot(cost1)
#plt.show()
| true |
77b3a5382e1421bb769484d557af5f46395f0a39 | Python | edwardkw/seegridtest | /deck.py | UTF-8 | 1,813 | 3.890625 | 4 | [] | no_license | # Edward Kwiatkowski, 10/31/2020, for Seegrid Interview
import random
class Card:
def __init__(self, suit, value):
self.__suit = suit
self.__value = value
def __repr__(self):
return self.__value + ' of ' + self.__suit
class Deck:
# defaults to standard deck, can be used for 'special' cards/decks
def __init__(self, suits=['Clubs','Diamonds','Hearts','Spades'], values=['2','3','4','5','6','7','8','9','10','Jack','Queen','King','Ace']):
self.__size = len(suits)*len(values)
self.__deck = []
for suit in suits:
for value in values:
self.__deck.append(Card(suit, value))
self.__place_in_deck = 0
# added optional param 'full' to specify whether all dealt cards are returned to the deck before shuffle
def shuffle(self, full=False):
newDeck = []
if full:
self.__place_in_deck = 0
while len(self.__deck):
if self.__place_in_deck == len(self.__deck) - 1:
newDeck = self.__deck[0:self.__place_in_deck + 1] + newDeck
self.__deck = []
else:
randomIndex = random.randint(self.__place_in_deck, len(self.__deck) - 1)
newDeck.append(self.__deck.pop(randomIndex))
self.__deck = newDeck
# no card dealt if none left
def deal_card(self):
if self.__place_in_deck == self.__size:
return None
else:
self.__place_in_deck += 1
return self.__deck[self.__place_in_deck - 1]
def __repr__(self):
if self.__place_in_deck != self.__size:
return f'Dealt cards: {self.__deck[:self.__place_in_deck]} \nCards in deck: {self.__deck[self.__place_in_deck:]}'
# some sample usage
sampleDeck = Deck()
print(sampleDeck)
sampleDeck.deal_card()
sampleDeck.deal_card()
sampleDeck.deal_card()
print(sampleDeck)
sampleDeck.shuffle()
print(sampleDeck)
sampleDeck.shuffle(True)
print(sampleDeck)
| true |
8fb386044c9a62f69d5226698a9ebc3baa4b2873 | Python | Ebuyuktas/8.hafta_odevler-Fonksiyonlar | /ebob.py | UTF-8 | 328 | 3.734375 | 4 | [] | no_license | #odev 4#
#ebob bulma#
print("""İki sayi giriniz
Ebobunu size verelim\n""")
def ebob():
sayi1=int(input("1.sayi: "))
sayi2=int(input("2.sayi: "))
list=[]
for i in range(1,sayi2+1):
if sayi1%i==0 and sayi2%i==0:
list.append(i)
a=max(list)
return a
print(ebob())
| true |
51944f0a4e748c61cd23618c698254298ea4dc76 | Python | essamhamedgaafar/sudoku_with_python | /check_if_get_solution_and_solve.py | UTF-8 | 8,122 | 2.6875 | 3 | [] | no_license | import math
import sys
def is_solved(l):
for x, i in enumerate(l):
for y, j in enumerate(i):
if j == 0:
# Incomplete
return None
for p in range(9):
if p != x and j == l[p][y]:
# Error
print('horizontal issue detected!', (x, y))
return False
if p != y and j == l[x][p]:
# Error
print('vertical issue detected!', (x, y))
return False
i_n, j_n = get_box_start_coordinate(x, y)
for (i, j) in [(i, j) for p in range(i_n, i_n + 3) for q in range(j_n, j_n + 3)
if (p, q) != (x, y) and j == l[p][q]]:
# Error
print('box issue detected!', (x, y))
return False
# Solved
return True
def is_valid(l):
for x, i in enumerate(l):
for y, j in enumerate(i):
if j != 0:
for p in range(9):
if p != x and j == l[p][y]:
# Error
print('horizontal issue detected!', (x, y))
return False
if p != y and j == l[x][p]:
# Error
print('vertical issue detected!', (x, y))
return False
i_n, j_n = get_box_start_coordinate(x, y)
for (i, j) in [(i, j) for p in range(i_n, i_n + 3) for q in range(j_n, j_n + 3)
if (p, q) != (x, y) and j == l[p][q]]:
# Error
print('box issue detected!', (x, y))
return False
# Solved
return True
def get_box_start_coordinate(x, y):
return 3 * int(math.floor(x/3)), 3 * int(math.floor(y/3))
def get_horizontal(x, y, l):
return [l[x][i] for i in range(9) if l[x][i] > 0]
def get_vertical(x, y, l):
return [l[i][y] for i in range(9) if l[i][y] > 0]
def get_box(x, y, l):
existing = []
i_n, j_n = get_box_start_coordinate(x, y)
for (i, j) in [(i, j) for i in range(i_n, i_n + 3) for j in range(j_n, j_n + 3)]:
existing.append(l[i][j]) if l[i][j] > 0 else None
return existing
def detect_and_simplify_double_pairs(l, pl):
for (i, j) in [(i, j) for i in range(9) for j in range(9) if len(pl[i][j]) == 2]:
temp_pair = pl[i][j]
for p in (p for p in range(j+1, 9) if len(pl[i][p]) == 2 and len(set(pl[i][p]) & set(temp_pair)) == 2):
for q in (q for q in range(9) if q != j and q != p):
pl[i][q] = list(set(pl[i][q]) - set(temp_pair))
if len(pl[i][q]) == 1:
l[i][q] = pl[i][q].pop()
return True
for p in (p for p in range(i+1, 9) if len(pl[p][j]) == 2 and len(set(pl[p][j]) & set(temp_pair)) == 2):
for q in (q for q in range(9) if q != i and p != q):
pl[q][j] = list(set(pl[q][j]) - set(temp_pair))
if len(pl[q][j]) == 1:
l[q][j] = pl[q][j].pop()
return True
i_n, j_n = get_box_start_coordinate(i, j)
for (a, b) in [(a, b) for a in range(i_n, i_n+3) for b in range(j_n, j_n+3)
if (a, b) != (i, j) and len(pl[a][b]) == 2 and len(set(pl[a][b]) & set(temp_pair)) == 2]:
for (c, d) in [(c, d) for c in range(i_n, i_n+3) for d in range(j_n, j_n+3)
if (c, d) != (a, b) and (c, d) != (i, j)]:
pl[c][d] = list(set(pl[c][d]) - set(temp_pair))
if len(pl[c][d]) == 1:
l[c][d] = pl[c][d].pop()
return True
return False
def update_unique_horizontal(x, y, l, pl):
tl = pl[x][y]
for i in (i for i in range(9) if i != y):
tl = list(set(tl) - set(pl[x][i]))
if len(tl) == 1:
l[x][y] = tl.pop()
return True
return False
def update_unique_vertical(x, y, l, pl):
tl = pl[x][y]
for i in (i for i in range(9) if i != x):
tl = list(set(tl) - set(pl[i][y]))
if len(tl) == 1:
l[x][y] = tl.pop()
return True
return False
def update_unique_box(x, y, l, pl):
tl = pl[x][y]
i_n, j_n = get_box_start_coordinate(x, y)
for (i, j) in [(i, j) for i in range(i_n, i_n+3) for j in range(j_n, j_n+3) if (i, j) != (x, y)]:
tl = list(set(tl) - set(pl[i][j]))
if len(tl) == 1:
l[x][y] = tl.pop()
return True
return False
def find_and_place_possibles(l):
while True:
pl = populate_possibles(l)
if pl != False:
return pl
def populate_possibles(l):
pl = [[[]for j in i] for i in l]
for (i, j) in [(i, j) for i in range(9) for j in range(9) if l[i][j] == 0]:
p = list(set(range(1, 10)) - set(get_horizontal(i, j, l) +
get_vertical(i, j, l) + get_box(i, j, l)))
if len(p) == 1:
l[i][j] = p.pop()
return False
else:
pl[i][j] = p
return pl
def find_and_remove_uniques(l, pl):
for (i, j) in [(i, j) for i in range(9) for j in range(9) if l[i][j] == 0]:
if update_unique_horizontal(i, j, l, pl) == True:
return True
if update_unique_vertical(i, j, l, pl) == True:
return True
if update_unique_box(i, j, l, pl) == True:
return True
return False
def try_with_possibilities(l):
while True:
improv = False
pl = find_and_place_possibles(l)
if detect_and_simplify_double_pairs(
l, pl) == True:
continue
if find_and_remove_uniques(
l, pl) == True:
continue
if improv == False:
break
return pl
def get_first_conflict(pl):
for (x, y) in [(x, y) for x, i in enumerate(pl) for y, j in enumerate(i) if len(j) > 0]:
return (x, y)
def get_deep_copy(l):
new_list = [i[:] for i in l]
return new_list
def run_assumption(l, pl):
try:
c = get_first_conflict(pl)
fl = pl[c[0]
][c[1]]
# print('Assumption Index : ', c)
# print('Assumption List: ', fl)
except:
return False
for i in fl:
new_list = get_deep_copy(l)
new_list[c[0]][c[1]] = i
new_pl = try_with_possibilities(new_list)
is_done = is_solved(new_list)
if is_done == True:
l = new_list
return new_list
else:
new_list = run_assumption(new_list, new_pl)
if new_list != False and is_solved(new_list) == True:
return new_list
return False
if __name__ == "__main__":
l = [
[0, 0, 8, 2, 0, 0, 0, 5, 0],
[0, 0, 0, 0, 0, 4, 3, 0, 8],
[0, 6, 2, 9, 0, 0, 0, 0, 0],
[7, 0, 1, 0, 0, 2, 0, 0, 5],
[9, 0, 0, 7, 0, 1, 0, 0, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 4],
[2, 0, 0, 4, 0, 0, 0, 9, 0],
[0, 9, 0, 0, 6, 0, 0, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
# This puzzle copied from Hacked rank test case
if is_valid(l) == False:
print("Sorry! Invalid.")
sys.exit()
pl = try_with_possibilities(l)
is_done = is_solved(l)
if is_done == True:
for i in l:
print(i)
print("Solved!!!")
sys.exit()
print("Unable to solve by traditional ways")
print("Starting assumption based solving")
new_list = run_assumption(l, pl)
if new_list != False:
is_done = is_solved(new_list)
print('is solved ? - ', is_done)
for i in new_list:
print(i)
if is_done == True:
print("Solved!!! with assumptions.")
sys.exit()
print(l)
print("Sorry! No Solution. Need to fix the valid function :(")
sys.exit() | true |
e1a81b2e7ef31f0fba0c8b044ccf2a7e3d67dd69 | Python | luispedro/mahotas | /mahotas/internal.py | UTF-8 | 5,849 | 3.140625 | 3 | [
"MIT",
"BSL-1.0"
] | permissive | # Copyright (C) 2011-2019, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT (see COPYING file)
import numpy as np
def _get_output(array, out, fname, dtype=None, output=None):
'''
output = _get_output(array, out, fname, dtype=None, output=None)
Implements the mahotas output convention:
(1) if `out` is None, return np.empty(array.shape, array.dtype)
(2) else verify that output is of right size, shape, and contiguous
Parameters
----------
array : ndarray
out : ndarray or None
fname : str
Function name. Used in error messages
Returns
-------
output : ndarray
'''
detail = '.\nWhen an output argument is used, the checking is very strict as this is a performance feature.'
if dtype is None:
dtype = array.dtype
if output is not None: # pragma: no cover
import warnings
warnings.warn('Using deprecated `output` argument in function `%s`. Please use `out` in the future. It has exactly the same meaning and it matches what numpy uses.' % fname, DeprecationWarning)
if out is not None:
warnings.warn('Using both `out` and `output` in function `%s`.\nMahotas is going to ignore the `output` argument and use the `out` version exclusively.' % fname)
else:
out = output
if out is None:
return np.empty(array.shape, dtype)
if out.dtype != dtype:
raise ValueError(
'mahotas.%s: `out` has wrong type (out.dtype is %s; expected %s)%s' %
(fname, out.dtype, dtype, detail))
if out.shape != array.shape:
raise ValueError('mahotas.%s: `out` has wrong shape (got %s, while expecting %s)%s' % (fname, out.shape, array.shape, detail))
if not out.flags.contiguous:
raise ValueError('mahotas.%s: `out` is not c-array%s' % (fname,detail))
return out
def _get_axis(array, axis, fname):
'''
axis = _get_axis(array, axis, fname)
Checks that ``axis`` is a valid axis of ``array`` and normalises it.
Parameters
----------
array : ndarray
axis : int
fname : str
Function name. Used in error messages
Returns
-------
axis : int
The positive index of the axis to use
'''
if axis < 0:
axis += len(array.shape)
if not (0 <= axis < len(array.shape)):
raise ValueError('mahotas.%s: `axis` is out of bounds (maximum was %s, got %s)' % (fname, array.ndim, axis))
return axis
def _normalize_sequence(array, value, fname):
'''
values = _normalize_sequence(array, value, fname)
If `value` is a sequence, checks that it has an element for each dimension
of `array`. Otherwise, returns a sequence that repeats `value` once for
each dimension of array.
Parameters
----------
array : ndarray
value : sequence or scalar
fname : str
Function name. Used in error messages
Returns
-------
values : sequence
'''
try:
value = list(value)
except TypeError:
return [value for s in array.shape]
if len(value) != array.ndim:
raise ValueError('mahotas.%s: argument is sequence, but has wrong size (%s for an array of %s dimensions)' % (fname, len(value), array.ndim))
return value
def _verify_is_floatingpoint_type(A, function_name):
'''
_verify_is_integer_type(array, "function")
Checks that ``A`` is a floating-point array. If it is not, it raises
``TypeError``.
Parameters
----------
A : ndarray
function_name : str
Used for error messages
'''
if not np.issubdtype(A.dtype, np.floating):
raise TypeError('mahotas.{}: This function only accepts floating-point types (passed array of type {})'.format(function_name, A.dtype))
def _verify_is_integer_type(A, function_name):
'''
_verify_is_integer_type(array, "function")
Checks that ``A`` is an integer array. If it is not, it raises
``TypeError``.
Parameters
----------
A : ndarray
function_name : str
Used for error messages
'''
k = A.dtype.kind
if k not in "iub": # integer, unsigned integer, boolean
raise TypeError('mahotas.%s: This function only accepts integer types (passed array of type %s)' % (function_name, A.dtype))
def _verify_is_nonnegative_integer_type(A, function_name):
'''
_verify_is_nonnegative_integer_type(array, "function")
Checks that ``A`` is an unsigned integer array. If it is not, it raises
``TypeError``.
Parameters
----------
A : ndarray
function_name : str
Used for error messages
'''
_verify_is_integer_type(A, function_name)
if A.dtype.kind == 'i' and not np.all(A >= 0):
raise ValueError('mahotas.{0}: This function only accepts positive integer types (passed array of type {1})'.format(function_name, A.dtype))
def _make_binary(array):
'''
bin = _make_binary(array)
Returns (possibly a copy) of array as a boolean array
'''
array = np.asanyarray(array)
if array.dtype != bool:
return (array != 0)
return array
def _as_floating_point_array(array):
'''
array = _as_floating_point_array(array)
Returns (possibly a copy) of array as a floating-point array
'''
array = np.asanyarray(array)
if not np.issubdtype(array.dtype, np.floating):
return array.astype(np.double)
return array
def _check_3(arr, funcname):
if arr.ndim != 3 or arr.shape[2] != 3:
raise ValueError('mahotas.%s: this function expects an array of shape (h, w, 3), received an array of shape %s.' % (funcname, arr.shape))
def _check_2(arr, funcname):
if arr.ndim != 2:
raise ValueError('mahotas.%s: this function can only handle 2D arrays (passed array with shape %s).' % (funcname, arr.shape))
| true |
9f3f1667188277930ab4f511525b40ea6430a30e | Python | Nahid320/Vehicle-Tracking | /method1.py | UTF-8 | 1,776 | 3.34375 | 3 | [] | no_license | # OpenCV Python program to detect cars in video frame
# import libraries of python OpenCV
import cv2
import time
import numpy as np
# capture frames from a video and count the number of frames
cap = cv2.VideoCapture('object detection.mp4')
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# load XML car classifiers
car_cascade = cv2.CascadeClassifier('cars.xml')
sum=0
# loop runs if capturing has been initialized.
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# resize original video to fit into the screen
scale=0.5
resized_frames = cv2.resize(frame, (int(scale*1920), int(scale*1080)))
# Detects cars of different sizes in the input image
cars = car_cascade.detectMultiScale(resized_frames, 1.1, 1, 0, (60,60), (100,100))
# count the number of detected cars in each frame
if type(cars) == np.ndarray:
count=cars.size/4
sum += count
# To draw a rectangle around each car
for (x,y,w,h) in cars:
cv2.rectangle(resized_frames,(x,y),(x+w,y+h),(0,0,255),2)
# Display frames in a window
cv2.putText(resized_frames,'COUNT IN EACH FRAME : %r' %int(count), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
cv2.imshow('video2', resized_frames)
time.sleep(0.1)
# Wait for Esc key to stop
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
total = int(np.ceil(sum/frame_count))
print("total count = ", total)
time.sleep(2)
# De-allocate any associated memory usage
cv2.destroyAllWindows() | true |
2d35cb49e0abaf6a6a1f230c5476c2ab0f5f3587 | Python | haizhiship/PythonLearn | /Lesson/LanguageLesson/Lesson7ClassorPPAttachment.py | UTF-8 | 1,847 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
9. PP附件语料库是描述介词短语附着决策的语料库。
语料库中的每个实例被编码为
PP Attachment对象:
使用此子语料库,建立一个分类器,尝试预测哪些介词是用来连接一对给定的名词。
例如:给定的名词对team 和 researchers,分类器应该预测出介词 of。
更多的使用 PP 附件语料库的信息,参阅http://www.nltk.org/howto 上的语料库HOWTO。
'''
from nltk.corpus import ppattach
import nltk
ppattach.attachments('training')
inst = ppattach.attachments('training')[2]
print inst
print inst.noun1, inst.prep, inst.noun2
#根据PPAttachment的特征,将字典项作为特征项录入,将prep作为结果项,直接生成分类器所需的set
def gender_features(NounAndPrep):
return ({'noun1': NounAndPrep.noun1,
'noun2': NounAndPrep.noun2,
'sent':NounAndPrep.sent,
'verb':NounAndPrep.verb,
'attachment':NounAndPrep.attachment
}, NounAndPrep.prep)
print gender_features(inst)
PPAttach_set = [gender_features(inst) for inst in ppattach.attachments('training')]
#print len(PPAttach_set) #20801 PPAttach总共有20801条
#生成开发测试验证集
PPAttach_train_set = PPAttach_set[1000:]
PPAttach_devtest_set = PPAttach_set[500:1000]
PPAttach_test_set = PPAttach_set[:500]
classifier = nltk.NaiveBayesClassifier.train(PPAttach_train_set)
print '决策树的开发测试准确率为 %s' % nltk.classify.accuracy(classifier, PPAttach_devtest_set)
print '决策树的测试准确率为 %s' % nltk.classify.accuracy(classifier, PPAttach_test_set)
print 'director %s conglomerate' % classifier.classify({'noun1':'director','noun2':'conglomerate'})
print 'team %s rearchers' % classifier.classify({'noun1':'researchers','noun2':'team'})
| true |
6ebf86c49ccbdde81f018bff5ca5a37654769a24 | Python | dparret/strava-cli | /strava/commands/zones_power.py | UTF-8 | 1,135 | 2.734375 | 3 | [
"MIT"
] | permissive | import math
import click
from strava.decorators import format_result
_ZONES_COLUMNS = (
'zone',
'power'
)
@click.command(name='power',
help='Generate power zones and FTP according to the 20 minutes averaged value provided.')
@click.argument('power', required=True, type=int, nargs=1)
@format_result(table_columns=_ZONES_COLUMNS)
def get_zones_power(power):
ftp = math.ceil(power * 0.95)
zone1_up = round(ftp*0.54)
zone2_up = round(ftp*0.74)
zone3_up = round(ftp*0.89)
zone4_up = round(ftp*1.04)
zone5_up = round(ftp*1.2)
zone6_up = round(ftp*1.49)
ftp_table = {
'FTP': ftp,
'---': '---',
'1 (<54%)': f'<{zone1_up}',
'2 (55%-74%)': f'{zone1_up + 1}-{zone2_up}',
'3 (75%-89%)': f'{zone2_up + 1}-{zone3_up}',
'4 (90%-104%)': f'{zone3_up + 1}-{zone4_up}',
'5 (105%-120%)': f'{zone4_up + 1}-{zone5_up}',
'6 (121%-149%)': f'{zone5_up + 1}-{zone6_up}',
'7 (>150%)': f'>{zone6_up + 1}',
}
return _as_table(ftp_table)
def _as_table(table):
return [{'zone': k, 'power': v} for k, v in table.items()]
| true |
650671399dd262d2571241a576c4726b79ddd762 | Python | eltonfernando/trainUnet | /analise_mask.py | UTF-8 | 372 | 2.578125 | 3 | [] | no_license | import cv2
import numpy as np
import os
local="./data/mask"
list_data=os.listdir(local)
soma_mask=0
for nome in list_data:
img=cv2.imread((local+'/'+nome))
soma_mask+=np.sum(img/255)
cv2.imshow("mask",img)
cv2.waitKey(1)
total_px=len(list_data)*480*848
print("total px: ",total_px)
print("total mask: ",soma_mask)
print("porcent% ",soma_mask/total_px*100) | true |