blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
839f46c31bd89b279cdd845c151db19c12159751 | Python | xishengcai/python_learn | /base/code.py | UTF-8 | 505 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
aa = '{"ab": "蔡锡生"}'
def josn_encode(dict, encoding):
for k, v in dict:
dict[k] = v.encode(encoding)
return dict
def throw_error():
# Exception: 抛出一个异常
raise Exception("抛出一个异常")
if __name__ == "__main__":
b = json.loads(aa)
print b # unicode
print b["ab"] # 自动解码
b["ab"] = b["ab"].encode("utf-8") # 重新编码
print b
print b["ab"] == "蔡锡生"
| true |
ab70993962c68287c5f0f5d271b59b52eb4da542 | Python | christianburkard/SingleEmitter_GUI | /Library/PIDMultiX.py | UTF-8 | 2,621 | 3.375 | 3 | [] | no_license | """
Created on 18.12.2019
Author: Christian Burkard
Masterthesis: Closed-Loop Control of an Acoustic Levitation System
PID control algorithm.
Example class call:
pid = readConfigPID()
pid.SetPoint = float(set point value)
pid.setSampleTime(0.0001)
pid.setKp(3.1) #default: 3.1
pid.setKi(120) #default: 89.7
pid.setKd(0.025) #default: 0.025
pid.update(control variable)
pidOutputVal = float(pid.output)
"""
import time
class PIDX:
def __init__(self, Px=3.1, Ix=89.7, Dx=0.026, currentTime=None):
self.Kpx = Px
self.Kix = Ix
self.Kdx = Dx
self.sampleTimex = 0.00
self.currentTime = currentTime if currentTime is not None else time.time()
self.lastTimex = self.currentTime
self.clearx()
def clearx(self):
self.SetPointx = 0.0
self.PTermx = 0.0
self.ITermx = 0.0
self.DTermx = 0.0
self.lastErrorx = 0.0
# Windup Guard
self.intErrorx = 0.0
self.windupGuardx = 20.0
self.outputx = 0.0
#calculates PID value for given reference feedback
def updatex(self, feedbackValueX, currentTime=None):
errorx = self.SetPointx - feedbackValueX
self.currentTime = currentTime if currentTime is not None else time.time()
deltaTimex = self.currentTime - self.lastTimex
deltaErrorx = errorx - self.lastErrorx
if (deltaTimex >= self.sampleTimex):
self.PTermx = self.Kpx * errorx
self.ITermx += errorx * deltaTimex
if (self.ITermx < -self.windupGuardx):
self.ITermx = -self.windupGuardx
elif (self.ITermx > self.windupGuardx):
self.ITermx = self.windupGuardx
self.DTermx = 0.0
if deltaTimex > 0:
self.DTermx = deltaErrorx / deltaTimex
#Remember last time and last error for next calculation
self.lastTimex = self.currentTime
self.lastErrorx = errorx
self.outputx = self.PTermx + (self.Kix * self.ITermx) + (self.Kdx * self.DTermx)
def setKpx(self, proportionalGainx):
self.Kpx = proportionalGainx
def setKix(self, integralGainx):
self.Kix = integralGainx
def setKdx(self, derivativeGainx):
self.Kdx = derivativeGainx
def setWindupx(self, windupx):
self.windupGuardx = windupx
def setSampleTime(self, sampleTime):
"""PID that should be updated at a regular interval.
Based on a pre-determined sampe time, the PID decides if it should compute or return immediately.
"""
self.sampleTime = sampleTime
| true |
963a2592d6593a9dc761f60773d08b8237815ce9 | Python | manishkamarapu/Facial-Expression-Recognition-2018 | /src/models.py | UTF-8 | 5,065 | 2.53125 | 3 | [] | no_license | from keras.layers import Convolution2D, Activation, BatchNormalization, MaxPooling2D, Dropout, Dense, Flatten
from keras.models import Sequential
'''
TensorFlow Model(s)
'''
# shallow model
def get_model1():
model = Sequential()
model.add(Convolution2D(64, (3, 3), padding='same', input_shape=(48,48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(256, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(512, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7))
model.add(Activation('softmax'))
return model
# actual model
def get_model2():
model = Sequential()
model.add(Convolution2D(64, (3, 3), padding='same', input_shape=(48,48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(128, (5, 5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(512, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(512, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7))
model.add(Activation('softmax'))
return model
# deeper model
def get_model3():
model = Sequential()
model.add(Convolution2D(64, (3, 3), padding='same', input_shape=(48,48,1)))
model.add(Convolution2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.4))
model.add(Convolution2D(128, (3, 3), padding='same'))
model.add(Convolution2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.4))
model.add(Convolution2D(256, (3, 3), padding='same'))
model.add(Convolution2D(265, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.4))
model.add(Convolution2D(512, (3, 3), padding='same'))
model.add(Convolution2D(512, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(2048))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(7))
model.add(Activation('softmax'))
return model
# shallow model
def get_model4():
model = Sequential()
model.add(Convolution2D(64, (3, 3), padding='same', input_shape=(48,48,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(128, (5, 5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(7))
model.add(Activation('softmax'))
return model
| true |
f282c9e0d1037953aaed91a309921b20590ef637 | Python | nofaralfasi/PERT-CPM-graph | /Pert_CPM.py | UTF-8 | 27,179 | 3.703125 | 4 | [] | no_license | import logging
import unittest
import itertools
def log_with_msg(msg):
def log(func):
def wrapper(self, *args, **kwargs):
logging.getLogger(__name__ + ": ").info(msg)
return func(self, *args, **kwargs)
return wrapper
return log
class Activity:
"""
The Activity class
-------------------
represents the edges between each node
Each activity has a unique name and its duration in the project
Activities will be equal if their name and their duration is the same
"""
@log_with_msg("Initializing Activity")
def __init__(self, name, duration):
self._name = name
self._duration = duration
@log_with_msg("Returning Activity repr")
def __repr__(self) -> str:
return f"<{self.name}, {self.duration} weeks>"
@log_with_msg("Returning Activity str")
def __str__(self) -> str:
return f"<{self.name}, {self.duration} weeks>"
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def duration(self) -> float:
return self._duration
@duration.setter
def duration(self, duration):
self._duration = duration
@log_with_msg("Comparing Activities")
def __eq__(self, other) -> bool:
return self.name == other.name and self.duration == other.duration
def __ne__(self, other) -> bool:
return not self == other
class Node:
"""
The Node class
--------------
represents the actual node in the graph.
knows the early and late finish times and also knows the slack time
Each node is unique and is recognized by its number.
A node is equal to another node if their number is equal (regardless of the other properties)
It is set that way to keep the nodes unique
Each node has an optional parallel node. If a node has a parallel node,
both activities leading to those nodes must be completed together
"""
@log_with_msg("Initializing Node")
def __init__(self, number: int, *parallel_nodes: "List of Nodes"):
self._number = number
self._early_finish = 0
self._late_finish = 0
self._slack = 0
self._parallel_nodes = parallel_nodes
@log_with_msg("Returning Node repr")
def __repr__(self) -> repr:
return f"(Node {self.number})"
@log_with_msg("Returning Node str")
def __str__(self) -> str:
string = f"(Node {self.number})"
if not (self.late_finish == self.early_finish == 0):
string += f"{[self.early_finish,self.late_finish]}"
if self.has_parallel_nodes():
string += " <---> {"
for node in list(self.parallel_nodes)[:-1]:
string += f"{node}, "
string += f"{self.parallel_nodes[-1]}" + "}"
return string
@property
def early_finish(self) -> float:
return self._early_finish
@early_finish.setter
def early_finish(self, early_finish):
self._early_finish = early_finish
@property
def late_finish(self) -> float:
return self._late_finish
@late_finish.setter
def late_finish(self, late_finish):
self._late_finish = late_finish
@property
def slack(self) -> float:
return self._slack
@slack.setter
def slack(self, slack: float):
self._slack = slack
@property
def number(self) -> int:
return self._number
@number.setter
def number(self, number: int):
self._number = number
@property
def parallel_nodes(self) -> tuple:
return self._parallel_nodes
@parallel_nodes.setter
def parallel_nodes(self, *parallel_nodes: tuple):
self._parallel_nodes = parallel_nodes
@log_with_msg("Checking if Node has parallel nodes")
def has_parallel_nodes(self) -> bool:
return list(self.parallel_nodes) != []
@log_with_msg("Comparing Nodes")
def __eq__(self, other) -> bool:
return self.number == other.number
def __ne__(self, other) -> bool:
return not self == other
@log_with_msg("Hashing Node")
def __hash__(self) -> float:
return hash(self.number)
# For sorting purposes
@log_with_msg("Checking what node is bigger")
def __lt__(self, other) -> bool:
return self.number < other.number
class Transition:
"""
Transition class
----------------
represents the transitions from one node to another.
keeps track of the activity, the start node and the target node
"""
@log_with_msg("Initializing Transition")
def __init__(self, from_node: Node, activity: Activity, to_node: Node):
self._from_node = from_node
self._activity = activity
self._to_node = to_node
@log_with_msg("Returning Transition repr")
def __repr__(self) -> repr:
return f"({repr(self._from_node)}, {self._activity}, {repr(self._to_node)})"
@log_with_msg("Returning Transition str")
def __str__(self) -> str:
return f" {self.from_node} -> {self._activity} -> {self._to_node}"
@property
def from_node(self) -> Node:
return self._from_node
@from_node.setter
def from_node(self, from_node: Node):
self._from_node = from_node
@property
def to_node(self) -> Node:
return self._to_node
@to_node.setter
def to_node(self, to_node: Node):
self._to_node = to_node
@property
def activity(self) -> Activity:
return self._activity
@activity.setter
def activity(self, activity: Activity):
self._activity = activity
@log_with_msg("Comparing Transitions")
def __eq__(self, other) -> bool:
return self.activity == other.activity
def __ne__(self, other) -> bool:
return not self == other
class Project:
"""
The Pert class
--------------
The class which represents the pert, using a graph.
The graph is a dictionary of {Node : list(Transition)} - each node with the corresponding transitions from it.
If no graph was passed to the constructor, an empty graph is initialized
"""
@log_with_msg("Initializing new PERT")
def __init__(self, graph: dict = None):
self._graph = graph if graph is not None else {}
self._all_nodes = []
self._all_paths = []
self._all_transition = []
self._all_activities = []
self._slack_list = []
self._isolated_list = []
self._critical_paths = []
self._start = None
self._end = None
self.update()
@log_with_msg("Printing PERT")
def __str__(self) -> str:
string = '!!WARNING: Invalid Graph!!' if not self.is_valid() else ''
for path in self.all_paths:
string += f"\nCRITICAL PATH: " if path in self.critical_paths else f"\n" + "\t" * 3 + " " * 3
for count, n in enumerate(path[:-1]):
if n == self.start:
string += f"{([trans for trans in self.graph[path[count]] if trans.to_node == path[count + 1]])[0]}"
elif self.end is not None and n == self.end:
string += f" -> {self.graph[path[count-1]][0].activity} -> {n}"
else:
for trans in self.graph[n]:
if trans.to_node == path[count + 1]:
string += f"-> {trans.activity} -> {trans.to_node}"
break
string += '\n'
return string
@property
def graph(self) -> dict:
return self._graph
@graph.setter
def graph(self, graph: dict):
self._graph = graph
self.update() if graph else self.__nullify_graph__()
@property
def all_nodes(self) -> list:
return self._all_nodes
@all_nodes.setter
def all_nodes(self, all_nodes: list):
self._all_nodes = all_nodes
@property
def all_paths(self) -> list:
return self._all_paths
@all_paths.setter
def all_paths(self, all_paths: list):
self._all_paths = all_paths
@property
def all_transition(self) -> list:
return self._all_transition
@all_transition.setter
def all_transition(self, all_transition: list):
self._all_transition = all_transition
@property
def all_activities(self) -> list:
return self._all_activities
@all_activities.setter
def all_activities(self, all_activities: list):
self._all_activities = all_activities
@property
def slack_list(self) -> list:
return self._slack_list
@slack_list.setter
def slack_list(self, slack_list: list):
self._slack_list = slack_list
@property
def isolated_list(self) -> list:
return self._isolated_list
@isolated_list.setter
def isolated_list(self, isolated_list: list):
self._isolated_list = isolated_list
@property
def critical_paths(self) -> list:
return self._critical_paths
@critical_paths.setter
def critical_paths(self, critical_paths: list):
self._critical_paths = critical_paths
@property
def start(self) -> Node:
return self._start
@start.setter
def start(self, start: Node):
self._start = start
@property
def end(self) -> Node:
return self._end
@end.setter
def end(self, end: Node):
self._end = end
# nullifies the graph's properties
@log_with_msg("Nullifying PERT")
def __nullify_graph__(self):
self.all_nodes = []
self.all_transition = []
self.isolated_list = []
self.all_paths = []
self.all_activities = []
self.slack_list = []
self.critical_paths = []
self.start = None
self.end = None
# calculates the early finished, the late finishes, the slack times and the duration of the project
@log_with_msg("Updating PERT")
def update(self):
if self.graph is not None:
self.all_nodes = self.__get_all_nodes__()
self.all_transition = self.__get_transition_list__()
self.isolated_list = self.__get_isolated_nodes__()
self.start = self.__get_start_node__()
self.end = self.__get_end_node__()
self.all_paths = self.__find_all_paths__(self.start)
self.all_activities = self.__get_activities_list__()
self.__calc_early_finishes__()
self.__calc_late_finishes__()
self.__calc_slack_times__()
self.critical_paths = self.__get_critical_paths__()
self.slack_list = self.__get_all_slacks__()
# Return the length of the project
@log_with_msg("Returning length of PERT")
def __len__(self) -> float:
return self.end.late_finish if self.graph is not None else 0
# Returns a node from the graph which his number is node_number
# @:param node_number - the number of the node which we want to retrieve
@log_with_msg("Retrieving Node")
def get_node_number(self, node_number: int) -> list or None:
for node in self.all_nodes:
if node.number == node_number:
return node
return None
# Adds a new activity to the project.
# @:param
# from_node - the node number from which the activity is going
# activity - the activity itself
# to_node - the node number to which the activity is going
@log_with_msg("Adding Activity")
def add_activity(self, from_node: int, activity: Activity, to_node: int):
f_node = self.get_node_number(from_node)
t_node = self.get_node_number(to_node)
transition = Transition(f_node if f_node else Node(from_node), activity, t_node if t_node else Node(to_node))
if transition not in self._all_transition:
self.graph[transition.from_node] = self.graph[transition.from_node] + [
transition] if transition.from_node in self.all_nodes else [transition]
if transition.to_node not in self.all_nodes:
self.graph[transition.to_node] = []
self.update()
# adds an arbitrary amount of transitions to the graph
# @:param *args - list of transitions to be added to the graph
def add_activities(self, *args: "List of Transitions"):
for transition in args:
self.add_activity(transition.from_node.number, transition.activity, transition.to_node.number)
# Removes a transition from the graph which his activity is the argument passed, thus removing the activity too
# @:param activity - the activity whom transition is deleted
@log_with_msg("Deleting Activity")
def del_activity(self, activity: Activity):
for transitions in self.graph.values():
for transition in transitions:
if activity == transition.activity:
transitions.remove(transition)
self.update()
# Returns an activity list
@log_with_msg("Getting Activity list")
def __get_activities_list__(self) -> list:
return [transition.activity for transition in self.all_transition]
# Return a list of all nodes, including isolated nodes
@log_with_msg("Getting all nodes")
def __get_all_nodes__(self) -> list:
return list(self.graph.keys()) if self.graph is not None else []
# Returns the transition list
@log_with_msg("Getting Transition list")
def __get_transition_list__(self) -> list:
return list(itertools.chain(*self.graph.values())) if self.graph is not None else []
# Returns a list of isolated nodes =
# nodes which none of the activities are going to, and none of the activities are going from
@log_with_msg("Getting isolated nodes")
def __get_isolated_nodes__(self) -> list:
return [node for node in self.all_nodes if
not self.graph[node] and node not in [tr.to_node for tr in self.all_transition]]
# Returns the critical paths in the project
# By definition - a critical path is a path which every node in it has 0 slack time
@log_with_msg("Getting critical paths")
def __get_critical_paths__(self) -> list:
return [path for path in self.all_paths if not [node.slack for node in path if node.slack is not 0]]
# Returns true if and only if this graph is valid, aka - has no cycles in it
# NOTE : a cyclic path in the graph is for example :
# Node1->Node2->Node3->Node4->Node2
@log_with_msg("Checking if valid")
def is_valid(self) -> bool:
return True not in [len(set(path)) < len(path) for path in self.all_paths]
# Returns a sorted list of slack
@log_with_msg("Getting all slack times")
def __get_all_slacks__(self) -> list:
return sorted([node.slack for node in self.all_nodes if node.slack is not 0], reverse=True)
# Returns the starting node, not including isolated nodes
@log_with_msg("Getting start nodes")
def __get_start_node__(self) -> Node:
for node in self.all_nodes:
if node not in [tr.to_node for tr in self.all_transition] and node not in self.isolated_list:
return node
# Returns the ending node, not including isolated nodes
# NOTICE: if the graph is cyclic, there might not be an end node, in this case, the returned value will be None
@log_with_msg("Getting end node")
def __get_end_node__(self) -> Node or None:
for node in self.all_nodes:
if not self.graph[node] and not node.has_parallel_nodes() and node not in self.isolated_list:
return node
return None
# Calculates the early finishes possible
@log_with_msg("Calculating early finishes")
def __calc_early_finishes__(self):
for node in list(itertools.chain(*self.all_paths)):
for transition in self._graph[node]:
transition.to_node.early_finish = transition.activity.duration + transition.from_node.early_finish \
if transition.to_node.early_finish is 0 else max(transition.to_node.early_finish,
transition.from_node.early_finish +
transition.activity.duration)
for par_node in transition.to_node.parallel_nodes:
self.get_node_number(par_node.number).early_finish = max(transition.to_node.early_finish,
par_node.early_finish)
# Calculates the latest finishes possible
@log_with_msg("Calculating late finishes")
def __calc_late_finishes__(self):
if self.end is not None:
self.end.late_finish = self.end.early_finish
for node in reversed(list(itertools.chain(*self.all_paths))):
for transition in reversed(self.graph[node]):
if transition.to_node.has_parallel_nodes():
late = min(
[self.get_node_number(par.number).late_finish for par in transition.to_node.parallel_nodes])
# if we haven't calculated late finish yet or if the late is smaller than the current late finish
if transition.to_node.late_finish is 0 or transition.to_node.late_finish > late:
transition.to_node.late_finish = late
# if to_node.late_finish still 0, we can't compute its from_node.late_finish yet...
if transition.to_node.late_finish is not 0:
transition.from_node.late_finish = transition.to_node.late_finish - transition.activity.duration \
if transition.from_node.late_finish is 0 and transition.from_node != self.start \
else min(transition.from_node.late_finish,
transition.to_node.late_finish - transition.activity.duration)
# Calculates the slack times for each node
@log_with_msg("Calculating slack times")
def __calc_slack_times__(self):
for node in self.all_nodes:
node.slack = node.late_finish - node.early_finish
# Finds all the paths in this project
# The search will not include paths with isolated nodes.
@log_with_msg("Finding all paths")
def __find_all_paths__(self, start_node: Node, path: list = None) -> list:
graph = self.graph
path = path if path is not None else []
if start_node in path or not graph[start_node]:
return [path + [start_node]]
path = path + [start_node]
if start_node not in graph:
return []
paths = []
for transition in graph[start_node]:
paths += [path for path in self.__find_all_paths__(transition.to_node, path)]
return paths
# Implementation of the contains method.
# Returns true if and only if the item is in this graph
# An item can be of class Node, Activity or Transition
@log_with_msg("Checking if item is in PERT")
def __contains__(self, item) -> bool:
if not (isinstance(item, Node) or isinstance(item, Activity) or isinstance(item, Transition)):
raise PermissionError("this item doesnt belong to the pert!")
return self.get_node_number(item.number) is not None if isinstance(item, Node) else \
item in self.all_activities if isinstance(item, Activity) else item in self.all_transition
"""
Test Classes - for your convenience
"""
class TestPert(unittest.TestCase):
node_list = [Node(-1)] + [Node(i) for i in range(1, 12)]
transition_list = [
Transition(node_list[1], Activity('Formalize specs', 6), node_list[2]),
Transition(node_list[2], Activity('Design system', 4), node_list[3]),
Transition(node_list[2], Activity('Certification Requirements', 3), node_list[4]),
Transition(node_list[2], Activity('Design Software', 6), node_list[6]),
Transition(node_list[3], Activity('Prototype System', 2), node_list[5]),
Transition(node_list[4], Activity('Certification Documentation', 4), node_list[9]),
Transition(node_list[9], Activity('Certification Application', 1), node_list[10]),
Transition(node_list[10], Activity('Complete Certification', 4), node_list[11]),
Transition(node_list[6], Activity('Code Software', 4), node_list[8]),
Transition(node_list[8], Activity('Complete Software', 1), node_list[11]),
Transition(node_list[5], Activity('Test System', 3), node_list[6]),
Transition(node_list[6], Activity('Release System', 2), node_list[7]),
Transition(node_list[7], Activity('Manufacture System', 4), node_list[11]),
]
@staticmethod
def create_new_graph():
graph = {}
for transition in TestPert.transition_list:
if transition.from_node in graph.keys():
graph[transition.from_node].append(transition)
else:
graph[transition.from_node] = [transition]
graph[TestPert.node_list[11]] = []
return graph
def setUp(self):
self.pert = Project(TestPert.create_new_graph())
def tearDown(self):
self.pert = None
# Tests for first graph
def test_starts(self):
self.assertEqual([(node.early_finish, node.late_finish) for node in sorted(self.pert.all_nodes)],
[(0, 0), (6, 6), (10, 10), (9, 12), (12, 12),
(15, 15), (17, 17), (19, 20), (13, 16), (14, 17),
(21, 21)])
def test_project_duration(self):
self.assertEqual(21, len(self.pert))
def test_isolated_activities(self):
self.pert.graph[Node(14)] = []
self.pert.update()
self.assertEqual([node.number for node in self.pert.isolated_list], [14])
def test_add_activity(self):
self.pert.add_activity(11, Activity("Test Activity", 2), 12)
self.assertEqual(12, self.pert.__get_end_node__().number)
self.assertEqual(len(self.pert), 23)
def test_del_activity(self):
self.pert.del_activity(Activity('Design Software', 6))
self.assertNotIn([1, 2, 6, 8, 11], [[node.number for node in path] for path in self.pert.all_paths])
self.pert.add_activity(2, Activity('Design Software', 6), 6)
self.pert.del_activity(Activity('Formalize specs', 6))
self.assertEqual(2, self.pert.start.number)
self.assertIn(1, [node.number for node in self.pert.isolated_list])
def test_critical_path(self):
self.assertEqual([[node.number for node in path] for path in self.pert.critical_paths],
[[1, 2, 3, 5, 6, 7, 11], [1, 2, 6, 7, 11]])
def test_valid_graph(self):
self.assertEqual(self.pert.is_valid(), True)
def test_invalid_graph(self):
graph = {Node(1): [Transition(Node(1), Activity("Test1", 1), Node(2))],
Node(2): [Transition(Node(2), Activity("Test2", 2), Node(3))],
Node(3): [Transition(Node(3), Activity("Test3", 3), Node(4))],
Node(4): [Transition(Node(4), Activity("Test4", 4), Node(2))]}
self.pert.graph = graph
self.assertEqual(self.pert.is_valid(), False)
def test_slack_list(self):
self.assertEqual(self.pert.slack_list, [3, 3, 3, 1])
class TestPert2(unittest.TestCase):
node_list_2 = [Node(0),
Node(1, Node(2)),
Node(2),
Node(3),
Node(4, Node(3), Node(6)),
Node(5, Node(6)),
Node(6),
Node(7, Node(8)),
Node(8)]
transition_list_2 = [
Transition(node_list_2[0], Activity("Task1", 4), node_list_2[1]),
Transition(node_list_2[0], Activity("Task5", 6), node_list_2[2]),
Transition(node_list_2[0], Activity("Task9", 5), node_list_2[3]),
Transition(node_list_2[2], Activity("Task2", 2), node_list_2[4]),
Transition(node_list_2[3], Activity("Task6", 4), node_list_2[6]),
Transition(node_list_2[4], Activity("Task8", 5), node_list_2[5]),
Transition(node_list_2[4], Activity("Task10", 8), node_list_2[8]),
Transition(node_list_2[4], Activity("Task3", 2), node_list_2[7]),
Transition(node_list_2[5], Activity("Task4", 5), node_list_2[8]),
Transition(node_list_2[6], Activity("Task7", 6), node_list_2[8]),
]
@staticmethod
def create_graph_with_parallels():
graph = {}
for transition in TestPert2.transition_list_2:
if transition.from_node in graph.keys():
graph[transition.from_node].append(transition)
else:
graph[transition.from_node] = [transition]
graph[TestPert2.node_list_2[1]] = []
graph[TestPert2.node_list_2[7]] = []
graph[TestPert2.node_list_2[8]] = []
return graph
def setUp(self):
self.pert = Project(TestPert2.create_graph_with_parallels())
def tearDown(self):
self.pert = None
# Tests for second graph:
def test_graph_two_starts(self):
self.assertEqual([(node.early_finish, node.late_finish) for node in sorted(self.pert.all_nodes)],
[(0, 0), (4, 6), (6, 6), (8, 9), (8, 8), (13, 13), (13, 13), (10, 19), (19, 19)])
def test_graph_two_length(self):
self.assertEqual(len(self.pert), 19)
def test_graph_two_critical_paths(self):
self.assertEqual([[node.number for node in path] for path in self.pert.critical_paths],
[[0, 2, 4, 5, 8], [0, 2, 4, 8]])
def test_graph_two_isolated_activities(self):
self.assertEqual(self.pert.isolated_list, [])
def test_graph_two_valid_graph(self):
self.assertEqual(self.pert.is_valid(), True)
def test_add_new_activity_to_graph_two(self):
self.pert.add_activity(8, Activity("Task12", 5), 9)
self.assertEqual(9, self.pert.end.number)
self.assertNotEqual(8, self.pert.end.number)
self.assertEqual(len(self.pert), 24)
def test_graph_two_del_activity(self):
self.pert.del_activity(Activity('Task3', 2))
self.assertNotIn([0, 2, 4, 7, 8], [[node.number for node in path] for path in self.pert.all_paths])
def test_graph_two_slack_list(self):
self.assertEqual(self.pert.slack_list, [9, 2, 1])
def test_empty_graph(self):
self.pert.graph = None
self.assertEqual(len(self.pert), 0)
self.assertEqual(self.pert.all_nodes, [])
self.assertEqual(self.pert.isolated_list, [])
self.assertEqual(self.pert.slack_list, [])
def read():
with open('HW1_Nofar_Pert_CPM.log') as f:
print("log file content:")
print(f.read())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, filename='HW1_Nofar_Alfasi_Pert_CPM.log', filemode='w', format='%(name)s %(message)s')
print(Project(TestPert.create_new_graph()))
print(Project(TestPert2.create_graph_with_parallels()))
unittest.main()
| true |
8db12aadc19be75b03dd5900f589283010bb99c3 | Python | MaheyDS/irobot-submission | /FreeFood.py | UTF-8 | 7,350 | 2.75 | 3 | [] | no_license | import requests,pprint,os
from configparser import ConfigParser
class FreeFood(object):
# Variables to dynamically build api endpoint
API_KEY = os.environ.get('API_KEY')
API_KEY_NAME = ''
BASE_END_POINT = ''
RECIPE_SUMMARY_END_POINT = ''
RECIPE_PRICE_END_POINT = ''
RECIPE_BY_INGREDIENTS_END_POINT = ''
# Final user Shopping List
user_shopping_dict = []
def load_configurations(self):
"""Load config.properties
:return: None
"""
config = ConfigParser()
config.read('./config/config.properties')
self.API_KEY_NAME = config.get('PRD','API_KEY_NAME')
self.BASE_END_POINT = config.get('PRD','BASE_END_POINT')
self.RECIPE_SUMMARY_END_POINT = config.get('PRD','RECIPE_SUMMARY_END_POINT')
self.RECIPE_BY_INGREDIENTS_END_POINT = config.get('PRD','RECIPE_BY_INGREDIENTS_END_POINT')
self.RECIPE_PRICE_END_POINT = config.get('PRD','RECIPE_PRICE_END_POINT')
return
def get_response(self, payload, http_method):
"""Given http_method address retuens the JSON response
Args:
param1: http_method to invoke
Returns:
Returns JSON response for the GET method
"""
try:
final_endpoint = self.BASE_END_POINT+http_method+self.API_KEY_NAME+self.API_KEY
response = requests.get(final_endpoint, params=payload, timeout=6.0)
# Raise HTTPError in case HTTP request returned an unsuccessful status code
response.raise_for_status()
except requests.ConnectionError as e:
print("ERROR: Connection Error, check your network connection (internet issue)\n")
print(f"ERROR: ErrorTrace for debugging:\n {str(e)}")
except requests.exceptions.HTTPError as err:
print("ERROR: Invalid HTTP Response")
print(f"ERROR: ErrorTrace for debugging:\n {str(err)}")
except requests.Timeout as e:
print("ERROR: Timeout Error")
print(f"ERROR: ErrorTrace for debugging:\n {str(e)}")
except requests.RequestException as e:
print("ERROR: General Errors")
print(f"ERROR: ErrorTrace for debugging:\n {str(e)}")
else:
return response.json()
def get_recipe_by_ingredients(self, ingredients):
"""Given comma seperated string of ingredients, return recipes
Args:
param1: string of ingredients ex: apple,banana
Returns:
Returns list of JSON Recipes
"""
payload = {
'ingredients': ingredients,
'number': 100,
'limit_license': False,
'ranking': 1,
'ignore_pantry': True
}
return self.get_response(payload, self.RECIPE_BY_INGREDIENTS_END_POINT)
def get_recipe_summary(self, recipe_id):
"""Given a recipe_id get's the summary of recipe
Args:
param1: int value of recipe_id
Returns:
Returns JSON containing Recipe Summary
"""
payload = {
'id': recipe_id
}
return self.get_response(payload, self.RECIPE_SUMMARY_END_POINT.replace('{id}', str(recipe_id)))
def get_ingredient_price(self, recipe_id):
"""Given a recipe_id get's the price of every ingredients in recipe
Args:
param1: int value of recipe_id
Returns:
Returns JSON containing Recipe Price Summary
"""
payload = {
'id': recipe_id
}
return self.get_response(payload, self.RECIPE_PRICE_END_POINT.replace('{id}', str(recipe_id)))
def process_price_response(self, price_response):
"""Given a price_response returns dictionary of ingredients name and price
Args:
param1: JSON price response
Returns:
Returns dictionary containing ingredient's name & price
"""
return { price["name"]: price["price"] for price in price_response["ingredients"] }
def process_recipe_response(self, ingredients_response):
"""Given a JSON ingredients_response provess the response
Args:
param1: JSON value of ingredient response
Returns:
Returns JSON response of Shopping List
"""
# Loop through the API response until the user is satisfied with a recipe
for recipe in ingredients_response:
# Get a Recipe Summary for the given Recipe ID
recipe_summary = self.get_recipe_summary(recipe["id"])
print(f"\nHere is the summary of a new Recipe: {recipe['title']} \n")
pprint.pprint(recipe_summary['summary'])
# Ask User if he/she is satisfied with the recipe
user_satisfaction = input('\nDo you like this recipe??, please enter yes or no: ')
while user_satisfaction.lower() not in ['yes','no']:
print('Invalid Input')
user_satisfaction = input('Do you like this recipe??, please enter yes or no: ')
if user_satisfaction.lower() == 'yes':
# Price dictionary consists of pricing for every ingredient in the recipe
price_dictionary = self.process_price_response(self.get_ingredient_price(recipe["id"]))
# Add Meta prefixes, and check
for missedIngrd in recipe['missedIngredients']:
# Check if ingredient is available in Price Dictionary
if missedIngrd['name'] in price_dictionary:
ingredient_price = price_dictionary[missedIngrd['name']]
else:
# Now check appending Meta Tags
for metaTag in missedIngrd["meta"]:
if metaTag+" "+missedIngrd['name'] in price_dictionary:
ingredient_price = price_dictionary[metaTag+" "+missedIngrd['name']]
self.user_shopping_dict.append(
{
'ingredientName': missedIngrd['name'],
'aisleName': missedIngrd['aisle'],
'quantity': missedIngrd['amount'],
'estimatedCost': ingredient_price
}
)
break
if __name__ == "__main__":
# Instantiate Ingredients class
my_recipe = FreeFood()
# Load configFiles
my_recipe.load_configurations()
# Step1: Get UserInputs from CLI
ingredients_str = input('Enter comma seperated list of ingredients ex. apple,banana: ')
# Step2: Given Ingredients now get the Recipes
ingredients_response = my_recipe.get_recipe_by_ingredients(ingredients_str)
# Step3: Get Recipe Summary, and factor user choices
if bool(ingredients_response):
my_recipe.process_recipe_response(ingredients_response)
print("\nPlease find below the Shopping List to buy this item: \n")
pprint.pprint(my_recipe.user_shopping_dict)
# If the User's list of ingredients did not yield any Recipes
else:
print("\n No Recipes found for the given list of ingredients \n") | true |
3b2c1cfd1745631c938b081f52eac3d35ff75710 | Python | pamtrg/Python-Tkinter-Simple-Application | /main.py | UTF-8 | 12,782 | 2.875 | 3 | [] | no_license | '''
IMDB - Python
'''
import tkinter
import tkinter.messagebox
import tkinter.ttk as ttk
import tkinter.font as tkFont
import tkinter as tk
from tkinter import *
import os.path
import sqlite3
class ImdbApplication(tkinter.Frame):
results = ["","","","","","","","","","","","", 0, 0, 0, 0]
selectID = 0
def __init__(self, parent):
tkinter.Frame.__init__(self, parent)
self.conn = sqlite3.connect('imdb.db') #create a connection
self.curse = self.conn.cursor()
self.CreateImdbApplicationTable()
self.parent = parent
self.parent.title('IMDB')
self.initUI()
self.pack(expand=True, fill=tkinter.BOTH)
def initUI(self):
titles = ['Film Adı:','Yap. Yılı:','Yönetmen:']
self.searchlabel = tkinter.Label(self, text = "Film Ara:")
self.searchlabel.pack()
self.searchlabel.place(x = 20, y = 30, height=25)
for i in range(3):
l = tkinter.Label(self, text=titles[i], fg='black')
l.place(x = 20, y = 30 + (i+2)*30, height=25)
self.eFName = tkinter.Entry()
self.eFName.place(x = 160, y = 90, width=140, height=25)
self.eFName.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.eLName = tkinter.Entry()
self.eLName.place(x = 160, y = 120, width=140, height=25)
self.eLName.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.ePhone = tkinter.Entry()
self.ePhone.place(x = 160, y = 150, width=140, height=25)
self.ePhone.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.SearchEntry = tkinter.Entry()
self.SearchEntry.place(x = 160, y = 30, width=140, height=25)
self.SearchEntry.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.imdb_puan = tkinter.Spinbox(values=("IMDB PUANI","9+", "8+", "7+", "6+"))
self.imdb_puan.place(x = 460, y = 30, width=140, height=25)
self.imdb_puan.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.searchButton = tkinter.Button(text = "Ara", fg='#fff', command=self.bSearch)
self.searchButton.configure(background="#333",highlightbackground="#0CD9E8", highlightcolor="#0DFFCC",font=("Arial", 10, "bold"))
self.searchButton.pack()
self.searchButton.place(x = 330, y = 30, width=80, height=25)
self.clearButton = tkinter.Button(self, text = "Temizle", fg='#fff', command=self.clearInputs)
self.clearButton.configure(background="#333",highlightbackground="#0CD9E8", highlightcolor="#0DFFCC",font=("Arial", 10, "bold"))
self.clearButton.pack()
self.clearButton.place(x = 330, y = 190, width=80, height=25)
self.saveButton = tkinter.Button(self, text = "Kaydet", fg='#fff', command=self.addB)
self.saveButton.configure(background="#333", highlightbackground="#0CD9E8", highlightcolor="#0DFFCC",font=("Arial", 10, "bold"))
self.saveButton.pack()
self.saveButton.place(x = 160, y = 190, width=70, height=25)
self.removeButton = tkinter.Button(self, text = "Kaldır", fg='#fff', command=self.deleteB)
self.removeButton.configure(background="#333",highlightbackground="#0CD9E8", highlightcolor="#0DFFCC",font=("Arial", 10, "bold"))
self.removeButton.pack()
self.removeButton.place(x = 240, y = 190, width=60, height=25)
self.tree_header = ['Film Adı', 'Yapım Yılı', 'Yönetmen']
self.tree_list = self.getFilms()
self.tree = ttk.Treeview(columns=self.tree_header, show="headings")
vsb = ttk.Scrollbar(orient="vertical",command=self.tree.yview)
hsb = ttk.Scrollbar(orient="horizontal",command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set,xscrollcommand=hsb.set)
ttk.Style().configure("Treeview", background="#fff",foreground="#333", fieldbackground="#eee",font=("Arial", 12, "bold"))
self.tree.pack()
self.tree.place(x = 20, y = 250, width=700, height=210)
self._build_tree()
def CreateImdbApplicationTable(self):
sql = '''CREATE TABLE IF NOT EXISTS films (film_adi text, yapim_yili text, yonetmen text,ozet text, imdb_puani text, ID integer PRIMARY KEY AUTOINCREMENT)'''
self.curse.execute(sql)
self.conn.commit()
def getFilms(self):
connx = sqlite3.connect('imdb.db')
c = connx.cursor()
films = c.execute('SELECT film_adi,yapim_yili,yonetmen,ozet,imdb_puani FROM films').fetchall()
return films
def bSearch(self):
if (len(self.SearchEntry.get()) == 0) and (self.imdb_puan.get() == 'IMDB PUANI'):
tkinter.messagebox.showinfo('Mesaj', 'Bir arama kriteri belirleyin.')
else:
if (len(self.SearchEntry.get()) != 0) and (self.imdb_puan.get() != 'IMDB PUANI'): #Yani ikiside doluysa
tkinter.messagebox.showinfo('Mesaj', 'Tek kriter belirleyin..')
else:
if self.imdb_puan.get() == 'IMDB PUANI': #Yani sadece film_adi kriteri doluysa
film_adi = (self.SearchEntry.get(),)
sql = '''SELECT * FROM films where film_adi = ?'''
self.curse.execute(sql, film_adi)
rows = self.curse.fetchall()
if rows:
for i in self.tree.get_children():
self.tree.delete(i)
for item in rows:
self.tree.insert('', 'end', values=item)
self.tree.bind("<ButtonRelease-1>", self.clickItem)
self.tree.bind("<Double-1>", self.selectItem)
else:
self.initUI()
tkinter.messagebox.showinfo('Mesaj', 'Sonuç bulunamadı..')
else: #Yani sadece IMDB kriteri doluysa
imdb_pu = self.imdb_puan.get()[:-1] # [:-1] son karakteri yani +'yi silmek icin kullandik
sql = '''SELECT * FROM films where imdb_puani >= ?'''
self.curse.execute(sql, imdb_pu)
rows = self.curse.fetchall()
if rows:
for i in self.tree.get_children():
self.tree.delete(i)
for item in rows:
self.tree.insert('', 'end', values=item)
self.tree.bind("<ButtonRelease-1>", self.clickItem)
self.tree.bind("<Double-1>", self.selectItem)
def bFill(self, n):
self.SetEntryText(self.eFName, self.results[n])
self.SetEntryText(self.eLName, self.results[n+4])
self.SetEntryText(self.ePhone, self.results[n+8])
def addB(self):
if self.eFName.get() != '':
new_record = [(self.eFName.get(),self.eLName.get(),self.ePhone.get())]
#Film adinin bulunup bulunmadiginin kontrolu yapiliyor.
connx = sqlite3.connect('imdb.db')
c = connx.cursor()
c.execute('SELECT COUNT(*) FROM films WHERE film_adi = ?',(self.eFName.get(),))
films = c.fetchone()[0]
if films==0:
for item in new_record:
self.tree.insert('', 'end', values=item)
db = sqlite3.connect('imdb.db')
cursor = db.cursor()
cursor.execute('''INSERT INTO films (film_adi, yapim_yili, yonetmen, ozet, imdb_puani) VALUES(?,?,?,?,?)''', (self.eFName.get(), self.eLName.get(), self.ePhone.get(), "","0"))
db.commit()
tkinter.messagebox.showinfo('Mesaj', 'Başarıla Eklendi..')
else:
tkinter.messagebox.showinfo('Mesaj', '""'+self.eFName.get()+'""'+' filmi zaten bulunuyor..')
else:
tkinter.messagebox.showinfo('Mesaj', '"Film Adı" boş olamaz.')
def deleteB(self):
try:
selected_item = self.tree.selection()[0]
film_adi = self.tree.item(self.tree.selection())['values'][0]
tkinter.messagebox.showinfo('Mesaj', '"'+film_adi+'"'+ ' kaldırıldı.. ')
sql = '''DELETE FROM films WHERE film_adi = ? '''
self.curse.execute(sql, (film_adi,))
self.conn.commit()
self.tree.delete(self.tree.selection()[0])
except IndexError:
tkinter.messagebox.showinfo('Mesaj', 'Seçilmedi !')
def CreateIMDBTable(self):
sql = '''CREATE TABLE IF NOT EXISTS films (film_adi text, yapim_yili text, yonetmen text, ozet text, imdb_puani text)'''
self.curse.execute(sql)
self.conn.commit()
def SetEntryText(self, txtObject, value):
txtObject.delete(0, tkinter.END)
txtObject.insert(0, value)
def __del__(self):
self.conn.close() #close the connection when the Window is closed
def _build_tree(self):
for col in self.tree_header:
self.tree.heading(col, text=col.title(),command=lambda c=col: self.tree,anchor=tk.W)
self.tree.column(col,width=140,anchor=tk.W)
for item in self.tree_list:
self.tree.insert('', 'end', values=item)
self.tree.bind("<ButtonRelease-1>", self.clickItem)
self.tree.bind("<Double-1>", self.selectItem)
def selectItem(self, event):
curItem = self.tree.focus()
t_new = tk.Toplevel(takefocus = True)
t_new.geometry('400x500')
t_new.resizable(width='FALSE', height='FALSE')
t_new.wm_title("%s - %s" % (self.tree.item(curItem)['values'][0], self.tree.item(curItem)['values'][1]))
connx = sqlite3.connect('imdb.db')
c = connx.cursor()
films = c.execute('SELECT * FROM films WHERE film_adi = ?',(self.tree.item(curItem)['values'][0],)).fetchall()
l1 = tkinter.Label(t_new, text="Film Adı", fg='black')
l1.place(x = 7, y = 10, height=25)
self.e1 = tkinter.Entry(t_new)
self.e1.place(x = 10, y = 30, width=190, height=25)
self.e1.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.SetEntryText(self.e1,self.tree.item(curItem)['values'][0])
l2 = tkinter.Label(t_new, text="Film Yapımcısı", fg='black')
l2.place(x = 7, y = 60, height=25)
self.e2 = tkinter.Entry(t_new)
self.e2.place(x = 10, y = 80, width=190, height=25)
self.e2.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.SetEntryText(self.e2,self.tree.item(curItem)['values'][2])
l3 = tkinter.Label(t_new, text="Film Tarihi", fg='black')
l3.place(x = 7, y = 110, height=25)
self.e3 = tkinter.Entry(t_new)
self.e3.place(x = 10, y = 130, width=190, height=25)
self.e3.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.SetEntryText(self.e3,self.tree.item(curItem)['values'][1])
l3 = tkinter.Label(t_new, text="Film Özet,", fg='black')
l3.place(x = 7, y = 160, height=25)
self.e4 = tkinter.Text(t_new)
self.e4.place(x = 10, y = 180, width=290, height=85)
self.e4.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
self.e4.insert(INSERT, films[0][3])
l4 = tkinter.Label(t_new, text="IMDB Puanı", fg='black')
l4.place(x = 7, y = 270, height=25)
self.e5 = tkinter.Entry(t_new)
self.e5.place(x = 10, y = 290, width=190, height=25)
self.e5.configure(background="#fff", highlightbackground="#333", highlightcolor="#fff",font=("Arial", 10, "bold"))
imdb_p = films[0][4]
if imdb_p is None:
imdb_p = '0.0'
self.SetEntryText(self.e5,imdb_p)
self.saveDetailButton = tkinter.Button(t_new, text = "Kaydet", fg='#fff', command=self.saveDetail)
self.saveDetailButton.configure(background="#333", highlightbackground="#0CD9E8", highlightcolor="#0DFFCC",font=("Arial", 10, "bold"))
self.saveDetailButton.pack()
self.saveDetailButton.place(x = 10, y = 320, width=108, height=25)
def saveDetail(self):
film_adi = self.e1.get()
film_tarihi = self.e3.get()
film_yapimcisi = self.e2.get()
film_ozeti = self.e4.get("1.0",END)
film_imdb_puani = self.e5.get()
db = sqlite3.connect('imdb.db')
cursor = db.cursor()
cursor.execute('UPDATE films SET film_adi=?, yapim_yili=?, yonetmen=?, ozet=?, imdb_puani=? WHERE film_adi=?',(film_adi,film_tarihi,film_yapimcisi,film_ozeti,film_imdb_puani,film_adi,))
db.commit()
for i in self.tree.get_children():
self.tree.delete(i)
for item in self.getFilms():
self.tree.insert('', 'end', values=item)
self.tree.bind("<ButtonRelease-1>", self.clickItem)
self.tree.bind("<Double-1>", self.selectItem)
tkinter.messagebox.showinfo('Mesaj', 'Değişiklikler gerçekleştirildi..')
def clickItem(self, event):
curItem = self.tree.focus()
film_adi = str(self.tree.item(curItem)['values'][0])
yapim_yili = str(self.tree.item(curItem)['values'][1])
yonetmen = str(self.tree.item(curItem)['values'][2])
self.SetEntryText(self.eFName, film_adi)
self.SetEntryText(self.eLName, yapim_yili)
self.SetEntryText(self.ePhone, yonetmen)
def clearInputs(self):
self.SetEntryText(self.eFName, "")
self.SetEntryText(self.eLName, "")
self.SetEntryText(self.ePhone, "")
self.SetEntryText(self.SearchEntry, "")
for i in self.tree.get_children():
self.tree.delete(i)
for item in self.getFilms():
self.tree.insert('', 'end', values=item)
self.tree.bind("<ButtonRelease-1>", self.clickItem)
self.tree.bind("<Double-1>", self.selectItem)
def main():
root = tkinter.Tk()
root.geometry('750x500')
root.resizable(width='FALSE', height='FALSE')
root.option_add("*background", "#fff373")
app = ImdbApplication(root)
root.mainloop()
if __name__ == "__main__":
main()
| true |
3a08964365753778bb6288bba14ac3884d09203e | Python | Vitor-Aguiar/1-Mar.-Prog. | /4.py | UTF-8 | 103 | 3.59375 | 4 | [] | no_license | numero = int(input())
for i in range(1,numero+1):
if i % 2 == 0:
print("%d^2 = %d" %(i,i**2))
| true |
f06364a58b82ab979267a189f7c45c59a26d0d76 | Python | mingming733/LCGroup | /Sen/Min Stack.py | UTF-8 | 646 | 3.625 | 4 | [] | no_license | class MinStack:
# initialize your data structure here.
def __init__(self):
self.S = []
self.minS = []
# @param x, an integer
# @return nothing
def push(self, x):
self.S.append(x)
if self.minS == []:
self.minS.append(x)
else:
if self.minS[-1] >= x:
self.minS.append(x)
# @return nothing
def pop(self):
top = self.S.pop()
if self.minS[-1] == top:
self.minS.pop()
# @return an integer
def top(self):
return self.S[-1]
# @return an integer
def getMin(self):
return self.minS[-1] | true |
d34aaf43c87bed967e7e3515a72a9b87ed910a03 | Python | Aasthaengg/IBMdataset | /Python_codes/p00007/s693720304.py | UTF-8 | 112 | 2.953125 | 3 | [] | no_license | a=100000
n=int(input())
for i in range(n):
a*=1.05
if a%1000:
a=int(a//1000*1000+1000)
print(a)
| true |
cfbdcd47e4d0a5aa57dbccb338084af780a89db9 | Python | nathancy/stackoverflow | /58133383-blur-section-using-mask/blur_section_of_image.py | UTF-8 | 442 | 2.625 | 3 | [
"MIT"
] | permissive | import cv2
image = cv2.imread('1.png')
mask = cv2.imread('mask.png')
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
image[y:y+h, x:x+w] = cv2.GaussianBlur(ROI, (41,41), 0)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()
| true |
850898e4d0ccde43246580b466024d1d82342bae | Python | hohh0115/Data-Structures-Practices-with-Python | /Linear List/環狀雙向鏈結串列 Circular Doubly Linked List/build_insert_traverse_delete_01.py | UTF-8 | 5,681 | 3.90625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
import sys
class Node():
def __init__(self, d = None, n = None, p = None):
"""
建立節點
:param d: 資料欄位
:param n: 後繼節點指標
:param p: 前驅節點指標
"""
self.data = d
self.next = n
self.prior = p
def print_data(self):
print("Node value: " + str(self.data))
class DoublyLinkedList():
"""
初始化一個空的環狀的雙向鏈結串列 empty circular doubly linked list
由於是雙向的,所以新增一個一個tail node,比較好操作
因此,有資料的節點是位於head node/dummy node跟tail node中間的節點們
"""
def __init__(self):
"""
初始化
"""
self.head = Node(d='head')
self.tail = Node(d='tail')
self.size = 0
self.head.next = self.head.prior = self.tail
self.tail.next = self.tail.prior = self.head
def insert_at_front(self, user_data):
"""
頭插法
:param user_data:
:return:
"""
new_node = Node(user_data, self.head.next, self.head)
# new_node.next = self.head.next
# new_node.prior = self.head
self.head.next.prior = new_node
self.head.next = new_node
self.size += 1
def insert_at_end(self, user_data):
"""
尾插法(插在tail node前面的位置)
:param user_data:
:return:
"""
new_node = Node(user_data, self.tail, self.tail.prior)
# new_node.next = self.tail
# new_node.prior = self.tail.prior
self.tail.prior.next = new_node
self.tail.prior = new_node
self.size += 1
def insert(self, user_data, position):
"""
插入第position位置,position從1開始,而非1,如此語言上比較直觀
:param user_data:
:param position:
:return:
"""
curr_node = self.head # trick
curr_position = 1
if 0 < position <= self.size:
while curr_position < position: # curr_node = 第position位置前一個位置的node
curr_node = curr_node.next
curr_position += 1
new_node = Node(user_data, curr_node.next, curr_node)
# new_node.next = curr_node.next
# new_node.prior = curr_node
curr_node.next.prior = new_node
curr_node.next = new_node
self.size += 1
else:
print('Position not exist')
def fetch_position_data(self, position):
"""
找出第position位置的資料,position從1開始,而非0
若curr_node由self.head開始,則curr_position <= position
:param position:
:return:
"""
if 0 < position <= self.size:
curr_node = self.head.next # trick
curr_position = 1
while curr_position < position: # trick, curr_node = 第position位置的node
curr_node = curr_node.next
curr_position += 1
print('Position', position, 'data:', curr_node.data)
else:
print('Position not exist')
def remove_first(self):
"""
刪除串列中第一個節點(head node後的第一個節點)
:return:
"""
delete_node = self.head.next
self.head.next = self.head.next.next
delete_node.next.prior = self.head # 第position位置之後的節點的前驅指標更改
delete_node.next = delete_node.prior = None
self.size += 1
def remove_last(self):
"""
刪除串列中最後一個節點(tail node的前一個節點)
:return:
"""
delete_node = self.tail.prior
self.tail.prior = delete_node.prior
delete_node.prior.next = self.tail
delete_node.next = delete_node.prior = None
self.size -= 1
def remove(self, position):
"""
移除第position位置的節點,position從1開始,而非0
:param position:
:return:
"""
delete_node = self.head.next # trick
curr_position = 1
if 0 < position <= self.size:
while curr_position < position: # delete_node = 第position位置的node
delete_node = delete_node.next
curr_position += 1
delete_node.prior.next = delete_node.next
delete_node.next.prior = delete_node.prior
delete_node.next = delete_node.prior = None
self.size -= 1
else:
print('Position not exist')
def print_list_size(self):
"""
印出當前串列長度
:return:
"""
print('List Size:', self.size)
def print_list(self):
curr_node = self.head.next
while curr_node.next != self.head: # 當curr_node.next是head,代表curr_node為tail node
curr_node.print_data()
curr_node = curr_node.next
def main():
myList = DoublyLinkedList()
myList.insert_at_end('1')
myList.insert_at_end('2')
myList.insert_at_front('0')
myList.insert_at_end('3')
myList.insert('4', 2)
myList.insert_at_end('5')
myList.insert_at_front('6')
myList.insert('7', 5)
myList.print_list_size()
myList.print_list()
myList.fetch_position_data(7)
myList.fetch_position_data(2)
print('==================')
print('Begin to remove...')
myList.remove_first()
myList.remove_last()
myList.remove(4)
myList.remove(1)
myList.fetch_position_data(2)
myList.print_list()
if __name__ == '__main__':
main() | true |
9dbfe5db0d68f4cbc0a20228c2f5374d00b49af4 | Python | AdeyinkaAdegbenro/Feature_Request_App | /tests.py | UTF-8 | 8,260 | 2.53125 | 3 | [] | no_license | import os
import unittest
from config import basedir
from app import app, db
from app.models import FeatureRequest, ProductArea, Client
from datetime import datetime
from flask import url_for
import json
class TestCase(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'test.db')
with app.app_context():
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_landing_page(self):
response = self.app.get('/')
self.assertEqual(response.status_code, 200)
def test_create_feature_request(self):
# check that post request for creating feature request
# is successful
form_values = [{'name': 'title',
'value': 'social media share button'},
{'name': 'description',
'value': 'a button that allows'
' sharing on social media'},
{'name': 'client', 'value': 'Client A'},
{'name': 'client_priority', 'value': 1},
{'name': 'target_date', 'value': '2018-01-30'},
{'name': 'product_area', 'value': 'Policies'}]
response = self.app.post('/',
data=json.dumps(form_values),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# check ProductArea was created
product_area = ProductArea.query.first()
self.assertNotEqual(product_area, None)
# check Client was created
client = Client.query.first()
self.assertNotEqual(client, None)
# check all data are correct
feature_request = FeatureRequest.query.first()
self.assertNotEqual(feature_request, None)
self.assertEqual(feature_request.title, 'social media share button')
self.assertEqual(feature_request.description,
'a button that allows sharing on social media')
self.assertEqual(feature_request.client.name, 'Client A')
self.assertEqual(feature_request.client_priority, 1)
self.assertEqual(feature_request.target_date,
datetime.strptime('2018-01-30', '%Y-%m-%d'))
self.assertEqual(feature_request.product_area.name, 'Policies')
def test_client_priority_shifts_with_same_client_same_priority_num(self):
form_values = [{'name': 'title',
'value': 'social media share button'},
{'name': 'description',
'value': 'a button that allows '
'sharing on social media'},
{'name': 'client', 'value': 'Client A'},
{'name': 'client_priority', 'value': 1},
{'name': 'target_date', 'value': '2018-01-30'},
{'name': 'product_area', 'value': 'Policies'}]
# create feature request by client A with client priority of 1
response = self.app.post('/', data=json.dumps(form_values))
self.assertEqual(response.status_code, 200)
form_values2 = [{'name': 'title', 'value': 'Analytics Page'},
{'name': 'description',
'value': 'a page that show an analysis of users'},
{'name': 'client', 'value': 'Client A'},
{'name': 'client_priority', 'value': 1},
{'name': 'target_date', 'value': '2018-01-31'},
{'name': 'product_area', 'value': 'Policies'}]
# create feature request by client A with client priority of 1
response = self.app.post('/', data=json.dumps(form_values2))
self.assertEqual(response.status_code, 200)
# check that FeatureRequests created are two
self.assertEqual(FeatureRequest.query.count(), 2)
request1 = FeatureRequest.query.filter_by(id=1).one()
request2 = FeatureRequest.query.filter_by(id=2).one()
# check feature request 1 has client priority of 2
self.assertEqual(request1.client_priority, 2)
# check feature request 2 has client priority of 1
self.assertEqual(request2.client_priority, 1)
def test_same_client_priority_doesnt_shift_with_different_priority_num(self):
form_values = [{'name': 'title', 'value': 'social media share button'},
{'name': 'description',
'value': 'a button that allows'
' sharing on social media'},
{'name': 'client', 'value': 'Client A'},
{'name': 'client_priority', 'value': 1},
{'name': 'target_date', 'value': '2018-01-30'},
{'name': 'product_area', 'value': 'Policies'}]
# create feature request by client A with client priority of 1
response = self.app.post('/', data=json.dumps(form_values))
self.assertEqual(response.status_code, 200)
form_values2 = [{'name': 'title', 'value': 'Settings Page'},
{'name': 'description',
'value': 'a page that show settings'},
{'name': 'client', 'value': 'Client A'},
{'name': 'client_priority', 'value': 2},
{'name': 'target_date', 'value': '2018-01-31'},
{'name': 'product_area', 'value': 'Policies'}]
# create feature request by client A with client priority of 2
response = self.app.post('/', data=json.dumps(form_values2))
self.assertEqual(response.status_code, 200)
# check that FeatureRequests created are two
self.assertEqual(FeatureRequest.query.count(), 2)
# check feature request 1 has client priority of 1
request1 = FeatureRequest.query.filter_by(id=1).one()
self.assertEqual(request1.client_priority, 1)
# check feature request 2 has client priority of 2
request2 = FeatureRequest.query.filter_by(id=2).one()
self.assertEqual(request2.client_priority, 2)
def test_different_client_doesnt_shift_when_same_priority_num(self):
# create feature request by client A with client priority of 1
form_values = [{'name': 'title', 'value': 'social media share button'},
{'name': 'description',
'value': 'a button that allows '
'sharing on social media'},
{'name': 'client', 'value': 'Client A'},
{'name': 'client_priority', 'value': 1},
{'name': 'target_date', 'value': '2018-01-30'},
{'name': 'product_area', 'value': 'Policies'}]
response = self.app.post('/', data=json.dumps(form_values),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# create feature request by client B with client priority of 1
form_values2 = [{'name': 'title', 'value': 'Settings Page'},
{'name': 'description',
'value': 'a page that show settings'},
{'name': 'client', 'value': 'Client B'},
{'name': 'client_priority', 'value': 1},
{'name': 'target_date', 'value': '2018-01-31'},
{'name': 'product_area', 'value': 'Policies'}]
response = self.app.post('/', data=json.dumps(form_values2),
content_type='application/json')
self.assertEqual(response.status_code, 200)
# check feature request 1 by client A has client priority of 1
request1 = FeatureRequest.query.filter_by(id=1).one()
self.assertEqual(request1.client_priority, 1)
# check feature request 2 by client B has client priority of 1
request2 = FeatureRequest.query.filter_by(id=2).one()
self.assertEqual(request2.client_priority, 1)
if __name__ == "__main__":
unittest.main()
| true |
4b14e17dbc877e12f36e0d56d770b904999a7d9d | Python | dubugun/big_data_web | /python/0201/0201_실습_5.py | UTF-8 | 169 | 3.265625 | 3 | [] | no_license | i, hap = 0,0
num2 = 0
num2 = int(input('끝값 입력 : '))
for i in range(1, num2+1, 1) :
if i%3 != 0 :
print(i," ")
else :
print('짝 ')
| true |
0972f85434f818dc8123aff653d15282c27ce16c | Python | arrti/myF2E | /lib/loader.py | UTF-8 | 1,515 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2012 F2E.im
# Do have a faith in what you're doing.
# Make your life a story worth telling.
class Loader(object):
def __init__(self, db):
self.db = db
self.loaded = {
"model": {},
"handler": {},
}
def use(self, name):
name = name.split(".")
_name = name[0]
_type = name[1]
if(_type == "model"):
return self.load_model(_name)
if(_type == "handler"):
return self.load_handler(_name)
def load_model(self, name):
if(name in self.loaded["model"]):
return self.loaded["model"][name]
instance_name = "%s%s" % (name.capitalize(), "Model")
self.loaded["model"][name] = __import__("model.%s" % name)
self.loaded["model"][name] = eval('self.loaded["model"][name].%s.%s' % (name, instance_name))
self.loaded["model"][name] = self.loaded["model"][name](self.db)
return self.loaded["model"][name]
def load_handler(self, name):
if(name in self.loaded["handler"]):
return self.loaded["handler"][name]
instance_name = "%s%s" % (name.capitalize(), "Handle")
self.loaded["handler"][name] = __import__("handler.%s" % name)
self.loaded["handler"][name] = eval('self.loaded["handler"][name].%s.%s' % (name, instance_name))
self.loaded["handler"][name] = self.loaded["handler"][name](self.loader)
return self.loaded["handler"][name]
| true |
120a8c9d0d9729bb3ae4951a2ec0a8cb839cac6a | Python | pombredanne/Deltas | /deltas/tokenizers/tests/test_wikitext_split.py | UTF-8 | 3,185 | 3.25 | 3 | [
"MIT"
] | permissive | from nose.tools import eq_
from ..wikitext_split import wikitext_split
def test_wikitext_split():
input = "As a sentence, this 34 includes punctuation. \n" + \
"\n" + \
"==Header!==\n" + \
"克·科伊尔 し〤。foobar!" + \
"And then we have another sentence here!\n" + \
"[//google.com foo] " + \
"https://website.gov?param=value\n" + \
"peoples' ain't d’encyclopédie\n" + \
"[[foo|bar]]" + \
"mailto:email@email.mail"
expected = [('As', 'word'),
(' ', 'whitespace'),
('a', 'word'),
(' ', 'whitespace'),
('sentence', 'word'),
(',', 'comma'),
(' ', 'whitespace'),
('this', 'word'),
(' ', 'whitespace'),
('34', 'number'),
(' ', 'whitespace'),
('includes', 'word'),
(' ', 'whitespace'),
('punctuation', 'word'),
('.', 'period'),
(' ', 'whitespace'),
('\n\n', 'break'),
('==', 'equals'),
('Header', 'word'),
('!', 'epoint'),
('==', 'equals'),
('\n', 'whitespace'),
('克', 'cjk'),
('·', 'etc'),
('科', 'cjk'),
('伊', 'cjk'),
('尔', 'cjk'),
(' ', 'whitespace'),
('し', 'cjk'),
('〤', 'japan_punct'),
('。', 'japan_punct'),
('foobar', 'word'),
('!', 'epoint'),
('And', 'word'),
(' ', 'whitespace'),
('then', 'word'),
(' ', 'whitespace'),
('we', 'word'),
(' ', 'whitespace'),
('have', 'word'),
(' ', 'whitespace'),
('another', 'word'),
(' ', 'whitespace'),
('sentence', 'word'),
(' ', 'whitespace'),
('here', 'word'),
('!', 'epoint'),
('\n', 'whitespace'),
('[', 'brack_open'),
('//google.com', 'url'),
(' ', 'whitespace'),
('foo', 'word'),
(']', 'brack_close'),
(' ', 'whitespace'),
('https://website.gov?param=value', 'url'),
('\n', 'whitespace'),
('peoples\'', 'word'),
(' ', 'whitespace'),
('ain\'t', 'word'),
(' ', 'whitespace'),
('d’encyclopédie', 'word'),
('\n', 'whitespace'),
('[[', 'dbrack_open'),
('foo', 'word'),
('|', 'bar'),
('bar', 'word'),
(']]', 'dbrack_close'),
('mailto:email@email.mail', 'url')]
tokens = list(wikitext_split.tokenize(input))
for token, (s, t) in zip(tokens, expected):
print(repr(token), (s, t))
eq_(token, s)
eq_(token.type, t)
| true |
deaa6cf7aa16ff03e71a73d64f1203848adfe6a1 | Python | emzee831/sinethesizer | /sinethesizer/synth/effects.py | UTF-8 | 11,576 | 3.0625 | 3 | [
"MIT"
] | permissive | """
Modify sound with effects.
Author: Nikolay Lysenko
"""
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
from scipy.signal import butter, sosfilt
from sinethesizer.synth.waves import generate_wave
EFFECT_FN_TYPE = Callable[[np.ndarray, int], np.ndarray]
def frequency_filter(
sound: np.ndarray, frame_rate: int,
min_frequency: Optional[float] = None,
max_frequency: Optional[float] = None,
invert: bool = False, order: int = 10
) -> np.ndarray:
"""
Filter some frequencies from original sound.
:param sound:
sound to be modified
:param frame_rate:
number of frames per second
:param min_frequency:
cutoff frequency for high-pass filtering (in Hz);
there is no high-pass filtering by default
:param max_frequency:
cutoff frequency for low-pass filtering (in Hz);
there is no low-pass filtering by default
:param invert:
if it is `True` and both `min_frequency` and `max_frequency`
are passed, band-stop filter is applied instead of band-pass filter
:param order:
order of the filter; the higher it is, the steeper cutoff is
:return:
sound with some frequencies muted
"""
invert = invert and min_frequency is not None and max_frequency is not None
filter_type = 'bandstop' if invert else 'bandpass'
nyquist_frequency = 0.5 * frame_rate
min_frequency = min_frequency or 1e-2 # Arbitrary small positive number.
max_frequency = max_frequency or nyquist_frequency - 1e-2
min_threshold = min_frequency / nyquist_frequency
max_threshold = max_frequency / nyquist_frequency
second_order_sections = butter(
order, [min_threshold, max_threshold], btype=filter_type, output='sos'
) # 'ba' is not used, because sometimes it lacks numerical stability.
sound = sosfilt(second_order_sections, sound)
return sound
def oscillate_between_sounds(
sounds: np.ndarray, frame_rate: int, frequency: float,
waveform: str = 'sine'
) -> np.ndarray:
"""
Combine multiple sounds into one sound by oscillating between them.
:param sounds:
array of shape (n_sounds, n_channels, n_frames)
:param frame_rate:
number of frames per second
:param frequency:
frequency of oscillations between sound sources
:param waveform:
form of oscillations wave
:return:
sound composed from input sounds
"""
step = 2 / (sounds.shape[0] - 1)
thresholds = np.arange(-1, 1 + 1e-7, step)
weights = np.tile(thresholds.reshape((-1, 1)), (1, sounds.shape[2]))
wave = generate_wave(
waveform,
frequency,
np.ones(sounds.shape[2]),
frame_rate
)
wave = wave[0, :]
weights = (
(1 - np.abs(weights - wave) / step) * (np.abs(weights - wave) < step)
)
weights = weights.reshape((weights.shape[0], 1, weights.shape[1]))
sound = np.sum(sounds * weights, axis=0)
return sound
def filter_sweep(
sound: np.ndarray, frame_rate: int,
bands: List[Tuple[Optional[float], Optional[float]]] = None,
invert: bool = False, order: int = 10,
frequency: float = 6, waveform: str = 'sine'
) -> np.ndarray:
"""
Filter some frequencies from sound with oscillating cutoffs.
:param sound:
sound to be modified
:param frame_rate:
number of frames per second
:param bands:
list of pairs of minimum and maximum cutoff frequencies (in Hz);
oscillations are between sounds obtained from input sound after
applying filters with such cutoff frequencies
:param invert:
if it is `True`, for bands with both cutoff frequencies set not to
`None`, band-stop filters are applied instead of band-pass filters
:param order:
order of filters; the higher it is, the steeper cutoffs are
:param frequency:
frequency of oscillations between filtered sounds (in Hz)
:param waveform:
form of wave that specifies oscillations between filtered sounds
:return:
sound filtered with varying cutoff frequencies
"""
bands = bands or [(None, None)]
if len(bands) == 1:
sound = frequency_filter(
sound, frame_rate, bands[0][0], bands[0][1], invert, order
)
return sound
filtered_sounds = [
frequency_filter(
sound, frame_rate, min_cutoff_frequency, max_cutoff_frequency,
invert, order
)
for min_cutoff_frequency, max_cutoff_frequency in bands
]
filtered_sounds = [
x.reshape((1, x.shape[0], x.shape[1])) for x in filtered_sounds
]
filtered_sounds = np.concatenate(filtered_sounds)
sound = oscillate_between_sounds(
filtered_sounds, frame_rate, frequency, waveform
)
return sound
def overdrive(
sound: np.ndarray, frame_rate: int,
fraction_to_clip: float = 0.1, strength: float = 0.3
) -> np.ndarray:
"""
Overdrive the sound.
:param sound:
sound to be modified
:param frame_rate:
number of frames per second
:param fraction_to_clip:
fraction of the most outlying frames to be hard clipped
:param strength:
relative strength of distortion, must be between 0 and 1
:return:
overdriven sound
"""
if not (0 < fraction_to_clip < 1):
raise ValueError("Fraction to clip must be between 0 and 1.")
if not (0 <= strength < 1):
raise ValueError("Overdrive strength must be between 0 and 1.")
_ = frame_rate # All effects must have `frame_rate` argument.
abs_sound = np.abs(sound)
clipping_threshold = np.quantile(abs_sound, 1 - fraction_to_clip, axis=1)
clipping_threshold = clipping_threshold.reshape((-1, 1))
clipping_cond = abs_sound >= clipping_threshold
distorted_sound = sound - strength * sound**3 / clipping_threshold**2
clipped_sound = np.sign(sound) * (1 - strength) * clipping_threshold
sound = (
~clipping_cond * distorted_sound
+ clipping_cond * clipped_sound
)
sound /= (1 - strength)
return sound
def phaser(
sound: np.ndarray, frame_rate: int,
min_center: float = 220, max_center: float = 880,
band_width: float = 20, n_bands: int = 10, order: int = 10,
frequency: float = 5, waveform: str = 'sine',
original_share: float = 0.75, wahwah: bool = False
) -> np.ndarray:
"""
Apply phaser effect to sound.
Here, phaser is defined as weighted sum of:
1) original sound;
2) original sound modified by sweeping band-stop filter of narrow band.
Note that playing with arguments can significantly change resulting sound
and some settings produce awkward non-musical sounds. Also note that this
effect should be applied only to sounds with rich spectrum.
:param sound:
sound to be modified
:param frame_rate:
number of frames per second
:param min_center:
central frequency of the lowest band (in Hz)
:param max_center:
central frequency of the highest band (in Hz)
:param band_width:
width of sweeping band (in Hz)
:param n_bands:
number of band positions to consider; the higher it is, the more close
to classical phaser result is, but also the longer computations are
and the higher RAM consumption is during track creation
:param order:
order of filters; the higher it is, the steeper cutoffs are
:param frequency:
frequency of sweeping band oscillations;
the higher it is, the more input sound is distorted
:param waveform:
form of wave of sweeping band oscillations
:param original_share:
share of original sound in resulting sound
:param wahwah:
if it is `True`, band-pass filters are used instead of band-stop
filters and so the effect to be applied is called wah-wah, not phaser
:return:
phased sound
"""
step = (max_center - min_center) / n_bands
bands = [
(center - band_width / 2, center + band_width / 2)
for center in np.arange(min_center, max_center + 1e-7, step)
]
invert = not wahwah
filtered_sound = filter_sweep(
sound, frame_rate, bands, invert, order, frequency, waveform
)
sound = original_share * sound + (1 - original_share) * filtered_sound
return sound
def tremolo(
sound: np.ndarray, frame_rate: int,
frequency: float = 6, amplitude: float = 0.5, waveform: str = 'sine'
) -> np.ndarray:
"""
Make sound volume vibrating.
:param sound:
sound to be modified
:param frame_rate:
number of frames per second
:param frequency:
frequency of volume oscillations (in Hz)
:param amplitude:
relative amplitude of volume oscillations, must be between 0 and 1
:param waveform:
form of volume oscillations wave
:return:
sound with vibrating volume
"""
if not (0 < amplitude <= 1):
raise ValueError("Amplitude for tremolo must be between 0 and 1.")
amplitudes = amplitude * np.ones(sound.shape[1])
volume_wave = generate_wave(waveform, frequency, amplitudes, frame_rate)
volume_wave += 1
sound *= volume_wave
return sound
def vibrato(
sound: np.ndarray, frame_rate: int,
frequency: float = 4, width: float = 0.2, waveform: str = 'sine'
) -> np.ndarray:
"""
Make sound frequency vibrating.
:param sound:
sound to be modified
:param frame_rate:
number of frames per second
:param frequency:
frequency of sound's frequency oscillations (in Hz)
:param width:
difference between the highest frequency of oscillating sound
and the lowest frequency of oscillating sound (in semitones)
:param waveform:
form of frequency oscillations wave
:return:
sound with vibrating frequency
"""
semitone = 2 ** (1 / 12)
highest_to_lowest_ratio = semitone ** width
# If x = 0, d(x + m * sin(2 * \pi * f * x))/dx = 1 + 2 * \pi * f * m.
# If x = \pi, d(x + m * sin(2 * \pi * f * x))/dx = 1 - 2 * \pi * f * m.
# Ratio of above right sides is `highest_to_lowest_ratio`.
# Let us solve it for `m` (`max_delay`).
max_delay = (
(highest_to_lowest_ratio - 1)
/ ((highest_to_lowest_ratio + 1) * 2 * np.pi * frequency)
)
amplitudes = max_delay * frame_rate * np.ones(sound.shape[1])
frequency_wave = generate_wave(waveform, frequency, amplitudes, frame_rate)
time_indices = np.ones(sound.shape[1]).cumsum() - 1 + frequency_wave[0, :]
upper_indices = np.ceil(time_indices).astype(int)
upper_indices = np.clip(upper_indices, 0, sound.shape[1] - 1)
upper_sound = sound[:, upper_indices]
lower_indices = np.floor(time_indices).astype(int)
lower_indices = np.clip(lower_indices, 0, sound.shape[1] - 1)
lower_sound = sound[:, lower_indices]
weights = time_indices - lower_indices
sound = weights * upper_sound + (1 - weights) * lower_sound
return sound
def get_effects_registry() -> Dict[str, EFFECT_FN_TYPE]:
"""
Get mapping from effect names to functions that apply effects.
:return:
registry of effects
"""
registry = {
'filter': frequency_filter,
'filter_sweep': filter_sweep,
'overdrive': overdrive,
'phaser': phaser,
'tremolo': tremolo,
'vibrato': vibrato
}
return registry
| true |
adb7edd20c74ba94a73e7de7507138771ff24383 | Python | slavkoBV/solved-tasks-SoftGroup-course | /OOP_and_OOD/Todo/mvc/view.py | UTF-8 | 1,862 | 3.546875 | 4 | [] | no_license | import os
from collections import OrderedDict
class View:
menu = OrderedDict([
('a', 'Add new task'),
('r', 'Remove task'),
('m', 'Modify task'),
('w', 'Save to file'),
('l', 'Load list from file'),
])
sub_menu = OrderedDict([
('u', 'Update task (up to 18 symbols)'),
('d', 'Mark task as done'),
('s', 'Set task deadline (ex. 2017-04-08 18:00)'),
('c', 'Comment task (up to 21 symbols)'),
('p', 'Set task priority (ex. 1 - 10)')
])
@staticmethod
def clear():
"""Clear the display"""
os.system('cls' if os.name == 'nt' else 'clear')
@staticmethod
def show_main_menu():
"""Show main menu of program"""
for key, value in View().menu.items():
print('{} - {}'.format(key, value))
print('q - Quit')
@staticmethod
def show_sub_menu():
"""Show sub menu to modify tasks"""
for key, value in View().sub_menu.items():
print('{} - {}'.format(key, value))
print(' q - Back to Main')
@staticmethod
def input_value(parameter):
"""Interface to user input of different parameters"""
return input('Input {}: '.format(parameter))
@staticmethod
def show_list(data):
"""Show TODO list"""
print('{0:=^34}{1:^11}{2:=^35}'.format('=', 'TODO LIST', '='))
print('{0:=^80}'.format('='))
print('{0:^2}{1:^20}{2:^18}{3:^23}{4:^11}{5:^6}'.format('#', 'Task', 'Deadline', 'Comment', 'Priority',
'Done'))
print('{0:=^80}'.format('='))
if len(data) != 0:
for record in data:
print('{0!s:^2}{1!s:^20}{2!s:^18}{3!s:^23}{4!s:^10}{5!s:^7}'.format(*(i for i in record)))
print('\n' + '=' * 80 + '\n')
| true |
e07f802099ede08b122708e759983e6d05c1d623 | Python | bpenning/jupyter_repo | /old/dd2lhc.py | UTF-8 | 3,270 | 2.625 | 3 | [] | no_license | import pandas as pd
import math
import os
import numpy as np
from glob import glob
# whole bunch of defintions
gDM = 1.
gu = gd = gs = 0.25 # set to DM LHC WG recommendations
mn, conv_units = 0.938, 2.568 * pow(10., 27.)
Delta_d_p, Delta_u_p, Delta_s_p = -0.42, 0.85, -0.08
DATA_LOCATION = 'data/'
DATA_FILE_EXT = ''
def dataset_names():
datasets = glob('data/*.dat')
for dataset in datasets:
dataset = dataset.replace(DATA_LOCATION, '')
dataset = dataset.replace(DATA_FILE_EXT, '')
yield dataset
def get_datasets():
return list(dataset_names())
def dd2lhc(df, target, interaction):
print df
print dataset_names
gu = gd = gs = 0.25
if target == 'p' and (interaction is not 'scalar'):
f = abs(gDM * (gu * Delta_u_p + gd * Delta_d_p + gs * Delta_s_p)) #for p
if target == 'n' and (interaction is not 'scalar'):
f = abs(gDM * (gu * Delta_u_n + gd * Delta_d_n + gs * Delta_s_n)) #for n
# calculate mu
df['mu_nDM'] = mn * df['m_DM'] / (mn + df['m_DM'])
# apply conversion units to sigma
df['sigma'] = df['sigma'] * conv_units
df['sigma_in_GeV'] = df['sigma']
# calculate m_mediator
df['m_med'] = np.power(f * df['mu_nDM'], 0.5) / np.power(math.pi * df['sigma'] / 3., 0.25)
''' def lhc2dd(df):
gu = gd = gs = 0.25
f = abs(gDM * (gu * Delta_u_p + gd * Delta_d_p + gs * Delta_s_p)) #for p
# calculate mu
df['mu_nDM'] = mn * df['m_DM'] / (mn + df['m_DM'])
# apply conversion units to sigma
df['sigma_in_GeV'] = 3 * np.power(f * df['mu_nDM'], 2.) / (math.pi * np.power(df['m_med'], 4.))
df['sigma'] = df['sigma_in_GeV']/conv_units
'''
def get_data(dataset='PICOSD_p'):
dataset_type = 'DD'
if dataset in ['DD_2_LHC_n', 'DD_2_LHC_p', 'mMedmDM1', 'mMedmDM2']:
dataset_type = 'LHC'
input_file = os.path.join(DATA_LOCATION, dataset + DATA_FILE_EXT)
names = ['m_DM', 'sigma']
if dataset_type == 'LHC':
names = ['m_med', 'm_DM']
df = pd.read_csv(input_file, delim_whitespace=True,
names=names)
label = os.path.basename(input_file).split('.')[0]
df.insert(0, 'label', label)
interaction='axial'
nucleus='p'
if dataset_type == 'DD':
dd2lhc(df, nucleus, interaction)
elif dataset_type == 'LHC':
lhc2dd(df)
df.name=dataset.replace('.dat','_')+interaction+'_'+nucleus
# # apply conversion units to sigma
# df['sigma'] = df['sigma'] * conv_units
# # calculate mu
# df['mu_nDM'] = mn * df['m_DM'] / (mn + df['m_DM'])
# # calculate m_mediator
# df['m_med'] = pow(f * df['mu_nDM'], 0.5) / \
# pow(math.pi * df['sigma'] / 3., 0.25)
return df
def get_figure(df):
import matplotlib.pyplot as plt
plt.style.use('ggplot')
fig = plt.figure(figsize=(6.5875, 6.2125))
ax = fig.add_subplot(111)
ax.set_title("DD2LHC Pico (p, axial)")
ax.set_xlabel(r'$m_{Med}$')
ax.set_ylabel(r'$m_{DM}$')
ax.semilogy(df['m_med'], df['m_DM'], color='red')
# plt.show()
return fig
if __name__ == '__main__':
datafile='LHC_2_DD_p.dat'
df = get_data(datafile)
print df.name
fig=get_figure(df)
#fig.savefig('pico2plane2.png')
fig.show()
fig.savefig('')
| true |
8f7b19bc322990f99dbce2dc7a7e2ed83c14d010 | Python | venkatsvpr/Problems_Solved | /LC_Implement_Strstr_KMP.py | UTF-8 | 1,685 | 4 | 4 | [] | no_license | """
28. Implement strStr()
Implement strStr().
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
"""
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
def buildkmp (needle, kmp):
i = 0
j = 1
while (j < len(needle)):
if (needle[j] == needle[i]):
kmp[j] = i+1
j += 1
i += 1
else:
if (i == 0):
j+= 1
else:
i = kmp[i-1]
return
kmp = [0 for i in range(len(needle))]
buildkmp(needle,kmp)
idx = 0
nid = hid = 0
while (hid < len(haystack)):
if (nid == len(needle)):
return hid-len(needle)
if (needle[nid] == haystack[hid]):
nid += 1
hid += 1
else:
if (nid == 0):
hid += 1
else:
nid = kmp[nid-1]
if (nid == len(needle)):
return hid-len(needle)
return -1
| true |
ef5d6ae5e22020a37651cae84f30ecde6329e7ad | Python | huaweicloud/huaweicloud-sdk-python-frs | /frsclient/result/live_detect_result.py | UTF-8 | 1,371 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
class LiveDetectResult(object):
"""Result of live detect."""
def __init__(self, content):
self.content_origin = content
self.content_eval = eval(content.replace("true", "True").replace("false", "False"))
def get_original_result(self):
"""Get original http content.
:return: http content
:rtype: str
"""
return self.content_origin
def get_eval_result(self):
"""Get Eval http content.
:return: formatted http content, which is easy to use.
:rtype: dict
"""
return self.content_eval
def get_video_result(self):
"""Get video result
:rtype: dict
"""
return self.content_eval.get("video-result")
def get_warning_list(self):
"""Get warning list
:rtype: list
"""
return self.content_eval.get("warning-list")
def get_alive(self):
"""Get alive
:rtype: bool
"""
return self.content_eval.get("video-result").get("alive")
def get_picture(self):
"""Get face picture
:rtype: str
"""
return self.content_eval.get("video-result").get("picture")
def get_actions(self):
"""Get actions
:rtype: list
"""
return self.content_eval.get("video-result").get("actions") | true |
19dfe0d6403e185bf5be9f84948fec2671f4ad09 | Python | lesteve/clusterlib | /clusterlib/scheduler.py | UTF-8 | 5,593 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | """
Module to help working with scheduler such as sun grid engine (SGE) or
Simple Linux Utility for Resource Management (SLURM).
Main functions covered are :
- get the list of names of all running jobs;
- generate easily a submission query for a job.
"""
# Authors: Arnaud Joly
#
# License: BSD 3 clause
import subprocess
from xml.etree import ElementTree
__all__ = [
"queued_or_running_jobs",
"submit"
]
def _sge_queued_or_running_jobs():
try:
xml = subprocess.check_output("qstat -xml", shell=True,
stderr=subprocess.PIPE)
tree = ElementTree.fromstring(xml)
return [leaf.text for leaf in tree.iter("JB_name")]
except subprocess.CalledProcessError:
# qstat is not available
return []
def _slurm_queued_or_running_jobs():
try:
out = subprocess.check_output("squeue --noheader -o %j", shell=True,
stderr=subprocess.PIPE)
out = out.split("\n")[:-1]
return out
except subprocess.CalledProcessError:
# squeue is not available
return []
def queued_or_running_jobs():
"""Return the names of the queued or running jobs under SGE and SLURM
The list of jobs could be either the list of all jobs on the scheduler
or only the jobs associated to the user calling this function.
The default behavior is dependant upon scheduler configuration.
Try ``qstat`` in SGE or ``squeue`` in SLURM to know which behavior it
follows.
Returns
-------
out : list of string,
Returned a list containing all the names of the jobs that are running
or queued under the SGE or SLURM scheduler.
"""
out = []
for queued_or_running in (_sge_queued_or_running_jobs,
_slurm_queued_or_running_jobs):
out.extend(queued_or_running())
return out
_SGE_TEMPLATE = {
"job_name": '-N "%s"',
"memory": "-l h_vmem=%sM",
"time": "-l h_rt=%s",
"email": "-M %s",
"email_options": "-m %s",
"log_directory": "-o %s/$JOB_NAME.$JOB_ID",
}
_SLURM_TEMPLATE = {
"job_name": '--job-name=%s',
"memory": "--mem=%s",
"time": "--time=%s",
"email": "--mail-user=%s",
"email_options": "--mail-type=%s",
"log_directory": "-o %s/%s.txt",
}
_TEMPLATE = {
"sge": _SGE_TEMPLATE,
"slurm": _SLURM_TEMPLATE
}
_LAUNCHER = {
"sge": "qsub",
"slurm": "sbatch",
}
def submit(job_command, job_name="job", time="24:00:00", memory=4000,
email=None, email_options=None, log_directory=None, backend="slurm",
shell_script="#!/bin/bash"):
"""Write the submission query (without script)
Parameters
----------
job_command : str,
Command associated to the job, e.g. 'python main.py'.
job_name : str, optional (default="job")
Name of the job.
time : str, optional (default="24:00:00")
Maximum time format "HH:MM:SS".
memory : str, optional (default=4000)
Maximum virtual memory in mega-bytes
email : str, optional (default=None)
Email where job information is sent. If None, no email is asked
to be sent.
email_options : str, optional (default=None)
Specify email options:
- SGE : Format char from beas (begin,end,abort,stop) for SGE.
- SLURM : either BEGIN, END, FAIL, REQUEUE or ALL.
See the documenation for more information
log_directory : str, optional (default=None)
Specify the log directory. If None, no log directory is specified.
backend : {'sge', 'slurm'}, optional (default="slurm")
Backend where the job will be submitted
shell_script : str, optional (default="#!/bin/bash")
Specify shell that is used by the script.
Returns
-------
submission_query : str,
Return the submission query in the appropriate format.
The obtained query could be directly launch using os.subprocess.
Further options could be appended at the end of the string.
Examples
--------
First, let's generate a command for SLURM to launch the program
``main.py``.
>>> from clusterlib.scheduler import submit
>>> script = submit("python main.py --args 1")
>>> print(script)
echo '#!/bin/bash
python main.py --args 1' | sbatch --job-name=job --time=24:00:00 --mem=4000
The job can be latter launched using for instance ``os.system(script)``.
"""
if backend in _TEMPLATE:
launcher = _LAUNCHER[backend]
template = _TEMPLATE[backend]
else:
raise ValueError("Unknown backend %s expected any of %s"
% (backend, "{%s}" % ",".join(_TEMPLATE)))
job_options = [
template["job_name"] % job_name,
template["time"] % time,
template["memory"] % memory,
]
if email:
job_options.append(template["email"] % email)
if email_options:
job_options.append(template["email_options"] % email_options)
if log_directory:
if backend == "sge":
job_options.append(template["log_directory"] % log_directory)
elif backend == "slurm":
job_options.append(template["log_directory"]
% (log_directory, job_name))
# Using echo job_commands | launcher job_options allows to avoid creating
# a script file. The script is indeed created on the flight.
command = ("echo '%s\n%s' | %s %s"
% (shell_script, job_command, launcher, " ".join(job_options)))
return command
| true |
ef653e2bcaf48c1b45e7b27200a5eadd638a49ff | Python | CrystallineCat/vdom | /mdndata/spider.py | UTF-8 | 3,020 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive | from .layout import *
from keyword import iskeyword
import textwrap
from yapf.yapflib.yapf_api import FormatCode
def render_html(element):
return lxml.html.tostring(element).decode('utf-8')
class Text(str):
@classmethod
def parse(cls, context, request=None):
yield cls(''.join(context.xpath('.//text()').extract()).strip())
def wrap(self, n):
return type(self)(textwrap.fill(self, n))
def map_lines(self, pattern):
return type(self)('\n'.join(pattern % line for line in self.splitlines()))
def __str__(self):
return self.wrap(80)
def __repr__(self):
if '\n' not in self:
return str.__repr__(self)
text = self
if text.startswith('* '):
text = ' ' + text
return '"""\n %s\n """' % text.replace('\n', '\n ')
def component(tag, **kwds):
name = tag.replace('-', '_') + ('_' if iskeyword(tag) else '')
kwds = ', '.join(f'{k} = {v!r}' for k, v in kwds.items() if v is not None)
code = f'{name} = create_component({tag!r}, {kwds},)'
try:
return Text(FormatCode(code, style_config='pep8')[0])
except SyntaxError as e:
return Text(f'FIXME: {e}\n{code}').map_lines('# %s')
class Element(Layout):
url = ...
class Docs(Layout):
overview: Text = XPath(XPath.until('table', 'h2'))
notes: Text = XPath('id("Notes")')
docs: Docs = XPath('id("wikiArticle")/*[1]')
class HTMLIndex(Layout):
url = 'https://developer.mozilla.org/en-US/docs/Web/HTML/Element'
class Category(Layout):
class Item(Layout):
tag: Text = XPath('.')
content: Element = XPath('.')
name: Text = XPath('.')
docs: Text = XPath('set:intersection(following-sibling::*, %s)' % XPath.until('table[1]'))
items: [Item] = XPath('following-sibling::table[1]/tbody/tr/td[1]/a')
categories: [Category] = XPath('id("wikiArticle")/h2')
#svg_index_layout = (
# Every('article#wikiArticle > *') | After('h2#SVG_elements_by_category') | Until('h2') | Subdivisions('h3') |
# Collect(
# category=First('h3') | Collect(render_text),
# overview=(),
# items=Every('p a') | Collect(
# tag=Collect(render_text),
# content=Collect(svg_index.navigate) | element_page_layout,
# ),
# )
#)
# Assigning __str__ methods below to keep parsing and emitting separated in code:
Layout.__str__ = lambda self: '\n\n'.join(map(str, dict.values(self)))
HTMLIndex.__str__ = lambda self: '\n\n'.join(
(
'# -*- coding: utf-8 -*-',
'from .core import create_component',
f'# From {self.url}',
) + tuple(map(str, self.categories))
)
HTMLIndex.Category.__str__ = lambda self: '\n'.join(
(
self.name.wrap(72).map_lines('# == %s =='),
self.docs.wrap(78).map_lines('# %s'),
) + tuple(map(str, self.items))
)
HTMLIndex.Category.Item.__str__ = lambda self: component(self.tag[1:-1], docs=Text(self.content))
| true |
6f1dc96815191bfcb04150f15454df7022ffe72c | Python | y0ge5h/olympic_project_new | /q01_rename_columns/build.py | UTF-8 | 1,027 | 3.015625 | 3 | [] | no_license | # %load q01_rename_columns/build.py
# default imports
import pandas as pd
def q01_rename_columns(path):
df = pd.read_csv(path , skiprows = 1)
df.columns.values[0] = 'Country'
for x in range(0 , len(df.columns.values)):
if df.columns.values[x] == '# Summer':
df.columns.values[x+1] = 'Gold_Summer'
df.columns.values[x+2] = 'Silver_Summer'
df.columns.values[x+3] = 'Bronze_Summer'
df.columns.values[x+4] = 'Total_Summer'
elif df.columns.values[x] == '# Winter':
df.columns.values[x+1] = 'Gold_Winter'
df.columns.values[x+2] = 'Silver_Winter'
df.columns.values[x+3] = 'Bronze_Winter'
df.columns.values[x+4] = 'Total_Winter'
elif df.columns.values[x] == '# Games':
df.columns.values[x+1] = 'Gold_Total'
df.columns.values[x+2] = 'Silver_Total'
df.columns.values[x+3] = 'Bronze_Total'
df.columns.values[x+4] = 'Total'
return df
| true |
1db0c52cea13c8096ddcaa296a7c193e853c8b12 | Python | IcemanGambit/GSC | /Tester/GSC/Vehicle.py | UTF-8 | 4,149 | 2.78125 | 3 | [] | no_license | import sys, os, math
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import traci
import TrafficLight, Route
"""
getRecommentedSpeed(string, int, int) -> int
Returns the recommented speed to reach a green light for the next connection within mindistance and maxtime simulation steps
"""
def getRecommentedSpeed(vhId,minDistance, maxtime):
distance = _getDistanceNextTrafficLight(vhId)
spans = _getGreenSpans(vhId, maxtime)
t = traci.simulation.getCurrentTime()
maxSpeed = traci.lane.getMaxSpeed(traci.vehicle.getLaneID(vhId))
#Vehicle is on a connection or there is an induction loop ahead.
if traci.vehicle.getRoadID(vhId).find("Ju_") >= 0 or TrafficLight.inductionLoopAhead(vhId):
return maxSpeed
#If there are no more traffic lights on route or the traffic light is too far away
# -> drive at max speed
if distance == None or distance >= minDistance:
return maxSpeed
#Calculate recommended speed
smax = 0 #Reaching just as light changes to green
smin = 0 #Reaching just before light changes to red
for span in spans:
deltaTbegin = (span[0] - t)
deltaTend = (span[1] - t)
#If slowest speed is larger than max speed
# -> look at next span
smin = distance/(deltaTend/1000)
if smin > maxSpeed:
continue
#If light is green
if(deltaTbegin <= 0):
smax = maxSpeed #Drive as fast possible
else:
smax = distance/(deltaTbegin/1000) #Set speed to reach connection when it changes
#Only drive at max speed
if smax > maxSpeed:
smax = maxSpeed
#Always recommend at least 15 km/h ~ 4.1 m/s
if smax < 4.1:
return 4.1
return smax
#No traffic light ahead
return maxSpeed
"""
_getGreenSpans(string, int) -> [[int,int],[int,int],]
Gets the next connection (traffic light) on route, and returns the time spans when it is green.
Returned result is a list of pairs og start and end times represended as seconds since the simulation started.
"""
def _getGreenSpans(vhId, maxtime):
TL = _getNextTrafficLight(vhId)
if TL:
return TrafficLight.getNextGreen(TL[0],TL[1], TL[2], maxtime)
return []
"""
getNextTrafficLight(string) > [string, string, string]
Returns the next traffic light id, the incomming and outgoing edges on the route
The traffic light is visisted to reduce the complexity of getTrafficLightsOnRoute(vhId)
"""
def _getNextTrafficLight(vhId):
edge = traci.vehicle.getRoadID(vhId)
nextTL = Route.getTrafficLightsOnRoute(vhId)
if len(nextTL)== 0:
return None
if edge == nextTL[0][2]:
Route.visitTrafficLight(vhId)
if len(nextTL)== 0:
return None
return nextTL[0]
"""
getDistanceNextTraficLight(string) -> int
Returns the euclidean distance to the next traffic light, None if no light ahead.
"""
def _getDistanceNextTrafficLight(vhId):
TL = _getNextTrafficLight(vhId)
if not TL:
return None
TL_cord = traci.junction.getPosition(TL[0]) #Note that junction and traffic light is not the same but have identical id's
Vh_cord = traci.vehicle.getPosition(vhId)
return math.sqrt(((TL_cord[0]-Vh_cord[0])**2) + ((TL_cord[1]-Vh_cord[1])**2)) - TrafficLight.getRadius(TL[0]) #-20
"""
getTotalDistanceDriven(string) -> int
Note: This is not avaiable through TraCI, and we therefore made a hack.
This function must be called at each simulation step in order to function.
The total distance driven is recoded in _previousDistance for each vehicle where first element is the distance, the second is the previous edge and the third is the previous lane.
The route contain road ids, and only lanes have a length.
getLanePosition(vhId) returns how far the vehicle has driven on this lane.
"""
_previousDistance = {}
def getTotalDistanceDriven(vhId):
laneID = traci.vehicle.getLaneID(vhId)
roadID = traci.vehicle.getRoadID(vhId)
if not vhId in _previousDistance:
_previousDistance[vhId] = [0,roadID, laneID]
if _previousDistance[vhId][1] != roadID:
_previousDistance[vhId][0] += traci.lane.getLength(_previousDistance[vhId][2])
_previousDistance[vhId][1] = roadID
_previousDistance[vhId][2] = laneID
return _previousDistance[vhId][0] + traci.vehicle.getLanePosition(vhId)
| true |
78f9843460ef1287b22dcb8a41dc14e5ba87594d | Python | alexkinnear/fractalVisualizer | /src/ImagePainter.py | UTF-8 | 1,642 | 3.421875 | 3 | [] | no_license | from tkinter import Tk, Canvas, PhotoImage, mainloop
import GradientFactory
def paint(fractal, imagename='branches.png'):
"""Paint a Fractal image into the TKinter PhotoImage canvas.
This code creates an image which is 512x512 pixels in size."""
SIZE = fractal.config['pixels']
SIZE = int(SIZE)
GradScheme = GradientFactory.makeGradient(fractal)
# Figure out how the boundaries of the PhotoImage relate to coordinates on
# the imaginary plane.
minx = fractal.config['centerx'] - (fractal.config['axislength'] / 2.0)
maxx = fractal.config['centerx'] + (fractal.config['axislength'] / 2.0)
miny = fractal.config['centery'] - (fractal.config['axislength'] / 2.0)
# Display the image on the screen
window = Tk()
img = PhotoImage(width=SIZE, height=SIZE)
canvas = Canvas(window, width=SIZE, height=SIZE, bg=GradScheme.getColor(0))
canvas.pack()
canvas.create_image((SIZE/2, SIZE/2), image=img, state="normal")
# At this scale, how much length and height on the imaginary plane does one
# pixel take?
pixelsize = abs(maxx - minx) / SIZE
for row in range(SIZE, 0, -1):
for col in range(SIZE):
x = minx + col * pixelsize
y = miny + row * pixelsize
i = fractal.count(complex(x, y))
color = GradScheme.getColor(i)
img.put(color, (col, SIZE - row))
window.update() # display a row of pixels
# Output the Fractal into a .png image
img.write(imagename + ".png")
print("Wrote picture " + imagename + ".png")
# Call tkinter.mainloop so the GUI remains open
mainloop()
| true |
9a084a86a19728b18044acd885f93cdf108c458f | Python | Inpurple/Leetcode | /125. Valid Palindrome/Solution_python生成器.py | UTF-8 | 303 | 3.078125 | 3 | [] | no_license | class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
res=[x.lower() for x in s if x.isalnum()]#python生成器
return res==res[::-1]#列表反转,res[-1::-1]==res[::-1]
| true |
5fc5927d066996ac21003d64bc2ac59055e8aaea | Python | plynth/anticipate | /tests/test_adapt.py | UTF-8 | 9,281 | 3.15625 | 3 | [
"BSD-3-Clause"
] | permissive | from builtins import object
import pytest
from anticipate import adapt, adapter, anticipate
from anticipate.adapt import clear_adapters
from anticipate.exceptions import AnticipateParamError, AnticipateErrors
def setup_function(function):
"""
Make sure there are no adapters defined before start of test
"""
clear_adapters()
# Setup a basic int adapter for all tests
@adapter((str, float, int), (int, str))
def to_int(obj, to_cls):
return to_cls(obj)
def test_mro():
class Foo(object):
def __init__(self, data):
self.data = data
class Bar(Foo):
pass
@adapter(Foo, str)
def to_string(obj, to_cls):
return 'Adapted to string'
bar = Bar('My string')
s = adapt.adapt(bar, str)
assert s == 'Adapted to string'
assert s == 'Adapted to string'
class Zip(object):
def __init__(self, data):
self.data = data
class Zam(Zip):
pass
@adapter(Foo, Zip)
def from_foo_to_zip(obj, to_cls):
return 'Adapted to zip'
s = adapt.adapt(bar, Zam)
assert s == 'Adapted to zip'
def test_adapt_params():
@anticipate(foo=str, bar=int)
def test(foo, bar, zing):
return foo, bar, zing
class FizzBuzz(object):
def __str__(self):
return 'fizzbuzz'
def __int__(self):
return 22
@adapter(FizzBuzz, (str, int, float))
def from_fizz(obj, to_cls):
return to_cls(obj)
assert test(1, 2.3, 'fizz') == ('1', 2, 'fizz')
assert test(1, 2.3, zing='fizz') == ('1', 2, 'fizz')
assert test(1, bar=2.3, zing='fizz') == ('1', 2, 'fizz')
assert test(foo=1, bar=2.3, zing='fizz') == ('1', 2, 'fizz')
assert test('hi', '1', str) == ('hi', 1, str)
a = FizzBuzz()
assert test(a, a, a) == ('fizzbuzz', 22, a)
def test_unadapted():
"""
Ensure we can call a method without it being Adapted
"""
@anticipate(foo=str, bar=int)
def test(foo, bar):
return foo, bar
assert test(1, '0') == ('1', 0)
assert test.__unadapted__(1, '0') == (1, '0')
def test_bound_to():
"""
Ensure that an anticipated method gets bound to the subclass,
not it's base class.
"""
class BaseClass(object):
@anticipate()
def get_wrapped_self(self):
return self
class SubClass(BaseClass):
def get_self(self):
return self
a = BaseClass()
assert a.get_wrapped_self() == a
b = SubClass()
assert b.get_wrapped_self() == b
assert b.get_wrapped_self() == b.get_self()
def test_instance_bound_to():
"""
Ensure that an anticipated method gets bound to each instance
instead of its first instance.
"""
class BaseClass(object):
pass
class SubClass(BaseClass):
def __init__(self, *args, **kwargs):
super(SubClass, self).__init__(*args, **kwargs)
self.thing = {}
@anticipate()
def get_wrapped_self(self):
return self
def get_self(self):
return self
b = SubClass()
assert b.get_wrapped_self() is b
assert b.get_wrapped_self() is b.get_self()
assert id(b.get_wrapped_self().thing) == id(b.get_self().thing)
c = SubClass()
assert c.get_wrapped_self() is c
assert c.get_wrapped_self() is c.get_self()
assert id(c.get_wrapped_self().thing) == id(c.get_self().thing)
def test_args():
"""
Verify that `self` is the correct instance when
additional positional arguments are passed in.
"""
class Test(object):
@anticipate()
def get_args(self, arg, *args):
return self, arg, args
@anticipate()
def get_arg(self, arg):
return self, arg
@anticipate()
def get_self(self, foo=None):
return self
@anticipate(arg=int)
def get_arg_int(self, arg):
return self, arg
@anticipate(arg=int)
def get_args_int(self, arg, *args):
return self, arg, args
@anticipate(arg=int)
def get_arg_int(arg):
return arg
obj1 = object()
obj2 = object()
obj3 = object()
b = Test()
assert b.get_self() is b
# Verify that if there are no adapters, *args pass through
r = b.get_args(obj1, obj2, obj3)
assert r[0] is b
assert r[1] is obj1
assert r[2][0] is obj2
assert r[2][1] is obj3
# Verify that if there are adapters, positional args get adapted
r = b.get_args_int('1', obj2, obj3)
assert r[0] is b
assert r[1] == 1
assert r[2][0] is obj2
assert r[2][1] is obj3
# Verify that if there are no adapters, positional args pass through
r = b.get_arg(obj1)
assert r[0] is b
assert r[1] is obj1
# Verify that if there are no adapters, keyword args pass through
r = b.get_arg(arg=obj1)
assert r[0] is b
assert r[1] is obj1
# Verify that keyword args are adapted
r = b.get_arg_int(arg='1')
assert r[0] is b
assert r[1] == 1
assert get_arg_int(arg='1') == 1
def test_kwargs():
"""
Verify that kwargs can be adapted
"""
class Test(object):
@anticipate(foo=int, bar=str)
def get_args(self, arg, **kwargs):
return arg, kwargs
@anticipate(foo=int, bar=str)
def get_args(arg, **kwargs):
return arg, kwargs
t = Test()
obj = object()
r = t.get_args(obj, foo='2', bar=3)
assert r[0] is obj
assert r[1]['foo'] == 2
assert r[1]['bar'] == '3'
r = get_args(obj, foo='2', bar=3)
assert r[0] is obj
assert r[1]['foo'] == 2
assert r[1]['bar'] == '3'
r = t.get_args(arg=obj, foo='2', bar=3)
assert r[0] is obj
assert r[1]['foo'] == 2
assert r[1]['bar'] == '3'
r = get_args(arg=obj, foo='2', bar=3)
assert r[0] is obj
assert r[1]['foo'] == 2
assert r[1]['bar'] == '3'
def test_adapt_all_list():
"""
Verify adapt_all returns a list
"""
int_like = ['1', 2.0]
r = adapt.adapt_all(int_like, int)
assert r[0] == 1
assert r[1] == 2
assert adapt.adapt_all(int_like, int) == [1, 2]
def test_adapt_all_with_none():
"""
Verify passing None to adapt_all returns an empty list
"""
r = adapt.adapt_all(None, int)
assert r == []
assert type(r) == list
def test_anticipate_list():
"""
Verify using a list for a parameter adapts expects an iterable
and adapts each value in the input.
"""
@anticipate(items=[int])
def get_list(items):
return items
int_like = ['1', 2.0]
r = get_list(int_like)
assert r[0] == 1
assert r[1] == 2.0
# Works on list input
assert get_list(int_like) == [1, 2]
# Works on tuple input
assert get_list((4.0, 5.0, 6.0)) == [4, 5, 6]
# Works on generator input
assert get_list(iter((4.0, 5.0, 6.0))) == [4, 5, 6]
def test_anticipate_list_with_none():
"""
Verify passing None for a parameter that expects a list returns an
empty list.
"""
@anticipate(items=[int])
def get_list(items):
return items
r = get_list(None)
assert r == []
assert type(r) == list
def test_anticipate_input():
"""
Verify that input can be checked without calling inner function
"""
@anticipate(items=[int], foo=str)
def get_list(items, foo=None):
return items, foo
with pytest.raises(AnticipateErrors) as exc_info:
get_list.input(items='a')
assert len(exc_info.value.errors) == 1
e = exc_info.value.errors[0]
assert isinstance(e, AnticipateParamError)
assert e.name == 'items'
args,kwargs = get_list.input(items=[1], foo=1)
assert args == ()
assert kwargs == {'items':[1],'foo':'1'}
args, kwargs = get_list.input(['1', 2], foo='abc')
assert args == ([1, 2],)
assert kwargs == {'foo': 'abc'}
def test_anticipate_wrong_params():
"""
Verify that anticipate complains if you anticipate invalid parameters
"""
with pytest.raises(KeyError):
@anticipate(foobar=int)
def noop(items):
pass
# Sanity check
@anticipate(items=int)
def noop(items):
pass
def test_anticipate_custom_fields():
"""
Verify that anticipate can use any object that implements `adapt`
as an anticipated type. This is good for things like SpringField fields.
"""
class IntField(object):
def adapt(self, value):
return int(value)
@anticipate(num=IntField())
def get_num(num):
return num
assert get_num('1') == 1
assert get_num(2.33) == 2
def test_anticipate_custom_fields_list():
"""
Verify that anticipate can use any object that implements ``adapt``
as an anticipated list of type. This is good for things like
SpringField fields.
"""
class IntField(object):
def adapt(self, value):
return int(value)
@anticipate(IntField(), nums=[IntField()])
def get_sum(nums):
return sum(nums)
@anticipate([IntField()], strings=[str])
def get_as_int(strings):
return strings
assert get_sum(['1', '2']) == 3
assert get_sum([2.33, 1.33]) == 3
assert get_as_int(['2', '1']) == [2, 1]
| true |
b91591f58697c1d98655b624347f09ae67be4ab9 | Python | dongbo-BUAA-VR/vtkplotter | /examples/other/icon.py | UTF-8 | 616 | 2.8125 | 3 | [
"MIT"
] | permissive | # Make a icon actor to indicate orientation or for comparison
# and place it in one of the 4 corners within the same renderer
#
from vtkplotter import Plotter
vp = Plotter(axes=5) # type 5 builds an annotated orientation cube
act = vp.load('data/270.vtk', c='blue', bc='v', legend=False)
vp.render()
vlg = vp.load('data/images/vtk_logo.png', alpha=.5)
vp.addIcon(vlg, pos=1)
elg = vp.load('data/images/embl_logo.jpg')
vp.addIcon(elg, pos=2, size=0.06)
a1 = vp.load('data/250.vtk', c=2)
a2 = vp.load('data/290.vtk', alpha=.4)
icon = vp.Assembly([a1, a2])
vp.addIcon(icon, pos=4) # 4=bottom-right
vp.show(act)
| true |
d0f87235646f472a47e12a9ca10a73a310e08823 | Python | vpilo/arduino-textscroller | /scripts/play_gif.py | UTF-8 | 2,002 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python3
from scroller import scroller, log
import argparse
from os import path
import signal
import sys
from time import sleep
import numpy as np
from PIL import Image, ImageSequence
WIDTH = 15
HEIGHT = 8
parser = argparse.ArgumentParser(description='Show a GIF on the Arduino text scroller.')
parser.add_argument('-a', '--address', dest='address', required=True,
help='IP address/hostname of the scroller')
parser.add_argument('-d', '--delay', dest='delay', default=0.0, type=float,
help='''Override gif speed with your own frame time (in msec)''')
parser.add_argument('-l', '--loop', dest='loop', action='store_true', default=False,
help='Play the GIF in a loop until interrupted (with Ctrl+C)')
parser.add_argument('file', help='File to display', type=argparse.FileType('r'))
args = parser.parse_args()
if args.delay < 0.0 or args.delay > 1000.0:
log.e("The delay must be in the 0-1000ms range")
if args.file is sys.stdin:
log.e("You must specify an actual file")
def signalHandler(sig, frame):
s.close()
exit(0)
signal.signal(signal.SIGINT, signalHandler)
path = args.file.name
args.file.close()
image = Image.open(path)
if args.delay is 0.0:
if not 'duration' in image.info or image.info['duration'] is 0:
log.e("This GIF doesn't have a speed. Specify one.")
args.delay = image.info['duration']
log.l("Showing {:s} with speed {:.0f}".format(path, args.delay))
frames = np.array([np.array(frame.copy().convert('RGB').getdata(), dtype=np.uint8)
.reshape(frame.size[1], frame.size[0], 3)
for frame in ImageSequence.Iterator(image)])
s = scroller.scroller(args.address)
while True:
for frame in frames:
str = ''
for row in frame:
for pixel in row:
for rgb in pixel:
str += format(rgb, '02x')
s.setFrame(str)
sleep(args.delay / 1000.0)
if not args.loop:
break
s.close()
| true |
8f407c8af80eebabf7a63094e141a6ac7724414e | Python | n28div/itAIRQ | /regions/abruzzo.py | UTF-8 | 3,392 | 2.8125 | 3 | [
"MIT"
] | permissive | from .region import BaseRegion
from .province import BaseProvince
from datetime import datetime
import requests
import json
import re
from statistics import mean
class Abruzzo(BaseRegion):
"""
Implementation of Abruzzo
"""
name = "Abruzzo"
indicator_map = {
'pm10': 'PM10#MEDIA_GIORNO',
'pm25': 'PM2_5#MEDIA_GIORNO',
'c6h6': 'BEN#MEDIA_GIORNO',
'no2': 'NO2#MAX_MEDIA_ORARIA_IN_GIORNO',
'so2': 'SO2#MAX_MEDIA_ORARIA_IN_GIORNO',
'co': 'CO#MAX_MEDIA_8ORE_IN_GIORNO',
'o3': 'O3#MAX_MEDIA_8ORE_IN_GIORNO'
}
def __init__(self):
super().__init__()
# adding provinces
self.add_province(BaseProvince(name='Chieti', short_name='CH'))
self.add_province(BaseProvince(name="L'Aquila", short_name='AQ'))
self.add_province(BaseProvince(name='Pescara', short_name='PE'))
self.add_province(BaseProvince(name='Teramo', short_name='TE'))
def extract_float(self, s: str) -> float:
"""
Extract the first float from a string
:param s: The string where the float will be extracted
:return: The float, if any found, or None
"""
f = re.findall(r'([0-9]*[.]*[0-9]+)', s)
return float(f[0]) if len(f) > 0 else None
def set_province_indicator(self, province: BaseProvince, values: list):
"""
Populate air quality of a province
:param province: The province of interest
:param values: The values from the stations of that province
"""
for indicator, mapped in self.indicator_map.items():
indicator_values = list()
for v in values:
if mapped in v['storico'] and len(v['storico'][mapped]) > 0:
indicator_val = self.extract_float(v['storico'][mapped][0]['valore'])
if indicator_val is not None:
indicator_values.append(indicator_val)
if len(indicator_values) > 0:
setattr(province.quality, indicator, round(mean(indicator_values), 2))
def _fetch_air_quality_routine(self, day: datetime):
"""
Populate the air quality of the provinces.
Data is fetched from 'https://sira.artaabruzzo.it/server/{date}.json' where {date}
is the date of interest in the format YYYYMMDD
Data about the sensors position is fetched from `https://sira.artaabruzzo.it/server/arta.json`
:param day: The day of which the air quality wants to be known (instance of `~datetime`)
"""
super()._fetch_air_quality_routine(day)
res = requests.get('https://sira.artaabruzzo.it/server/arta.json')
sensors_location = json.loads(res.text)['stazioni']
date_fmt = day.strftime("%Y%m%d")
res = requests.get(f'https://sira.artaabruzzo.it/server/{date_fmt}.json')
if res.status_code == 200:
sensors_values = json.loads(res.text)['stazioni']
for p in self.provinces:
province_sensors = [x['codice'] for x in sensors_location if x['prov'] == p.short_name]
province_values = [x for x in sensors_values if x['stazione'] in province_sensors]
self.set_province_indicator(p, province_values)
if self.on_quality_fetched is not None: self.on_quality_fetched(self) | true |
fdb77f324e76c958274fb42627ae43df2fce4804 | Python | yinxx2019/python | /lab03/guess_advanced.py | UTF-8 | 1,942 | 4.59375 | 5 | [] | no_license | import random as rnd
def main():
# print an intro message
print("Welcome to the Guessing Game!")
# draw a random integer in a range of 1 to 50
num = rnd.randint(1, 50)
# use print when testing
print(num)
# ask user to input a number and store it in the variable
user = int(input("I picked a number between 1 and 50. Try and guess! "))
# measure times of guessing
i = 1
# loop to check differences between numbers
while user != num:
# store the differece between two numers in a variable
difference = abs(user - num)
i += 1
if difference == 1:
user = int(input("Scalding hot! Guess one more time! "))
elif difference == 2:
user = int(input("Extremely warm! Guess one more time! "))
elif difference == 3:
user = int(input("Very warm! Guess one more time! "))
elif difference > 3 and difference <= 5:
user = int(input("Warm. Guess one more time! "))
elif difference > 5 and difference <= 8:
user = int(input("Cold. Guess one more time! "))
elif difference > 8 and difference <= 13:
user = int(input("Very cold. Guess one more time! "))
elif difference > 13 and difference <= 20:
user = int(input("Extremely cold. Guess one more time! "))
elif difference > 20:
user = int(input("Icy freezing miserably cold. Guess again! "))
else:
print("Congratulations. You figured it out in", i, "tries.")
# loop to check times of guessing
if i == 1:
print("That was lucky!")
elif i >= 2 and i <= 4:
print("That was amazing!")
elif i >= 5 and i <= 6:
print("That was okay.")
elif i == 7:
print("Meh.")
elif i >= 8 and i <= 9:
print("This is not your game.")
elif i >= 10:
print("You are the worst guesser I've ever seen.")
main()
| true |
b42fcfa2dc0176d68856bbedda906e3951ff10f7 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018 | /students/johnpharmd/lesson01/generators.py | UTF-8 | 840 | 3.84375 | 4 | [] | no_license | #!/usr/bin/env python
def intsum():
"""
Adds next integer y to integer value store x
"""
x = 0
y = 0
while True:
x += y
y += 1
yield x
def doubler():
"""
Doubles the value of x
"""
x = 1
while True:
yield x
x *= 2
def fib():
"""
Fibonacci seq generator
"""
i = 1
x = 1
flist = [1, 1]
while True:
yield x
i += 1
if i == 2:
x = 1
else:
x = flist[i-2] + flist[i-1]
flist.append(x)
def prime():
"""
Correctly generates sequence of only prime numbers through 113.
"""
i = 2
while True:
if i in (2, 3, 5, 7):
yield i
elif i % 2 == 1 and i % 3 != 0 and i % 5 != 0 and i % 7 != 0:
yield i
i += 1
| true |
e563c03612a2eeded6f35431ba09d77306356bfd | Python | device999/Machine_Learning | /HelloWorld/DataTypes_I/device999/arrays.py | UTF-8 | 926 | 4.6875 | 5 | [] | no_license | # Arrays
if __name__ == '__main__':
# Write a Python program to create an array of 5 integers and display the array items. Access individual element through indexes.
# Write a Python program to get the number of occurrences of a specified element in an array. (Done in lists)
sizeArray = int(input("Input the size of arrays = "))
my_array = []
for i in range(sizeArray):
my_array.append(int(input("Input the "+str(i+1)+" element = ")))
print("Second element of the array is = "+str(my_array[1]))
# Write a Python program to reverse the order of the items in the array.
print(my_array)
my_array.reverse()
print(my_array)
# Write a Python program to remove a specified item using the index from an array.
my_array.remove(12)
print(my_array)
# Write a Python program to insert a new item before the second element in an existing array.
my_array.insert(1, 12)
print(my_array) | true |
ea6345fe38012551ddaa5a4dd489bfe36c6356a3 | Python | rpodgorney/FORGE | /local2global.py | UTF-8 | 2,051 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python
#This script was writtem to read in pointsin global (earth model) along Utah FORGE wells and
#translate and rotate them to local model coordinates
#RKP June 2019: original creation
#RKP July 2020: convert to python3, add some formatting statements, and change the rotation points
import pandas as pd
import numpy as np
import math
#define the point of rotation, and set loal coordinates to start at 0,0
#this is from phase 2
#rot_x = 333325.0
#rot_y = 4262825.0
#for Phase 3_10 cell mesh
rot_x = 332978.088237227
rot_y = 4261736.07638301
#for Phase 3_25 cell mesh
#rot_x = 332650.3170419511
#rot_y = 4262375.377581012
#for Phase 3_40 cell mesh
#rot_x = 332492.8032821122
#rot_y = 4263079.260273337
#for kestrel 3 stim test
#rot_x = 334746.5299
#rot_y = 4263130.831
#define the rotation angle, clockwise is negative
theta = 10
# Open the local file with pandas to begin the processing
data = pd.read_csv('node_mesh_coords.csv')
#first the rotation
#first make new dataframe
# must deep copy to make a new entry in memory as need old values for conversion
rot_data = data.copy(deep=True)
#change to constant at the end de[ending on the mesh size]
rot_data['X'] = ((data['X'] * math.cos(math.radians(theta))) + (data['Y'] * math.sin(math.radians(theta))))
rot_data['Y'] = (((-data['X'] * math.sin(math.radians(theta))) + (data['Y'] * math.cos(math.radians(theta)))))
#Now trnaslate them to the rotation point of the local system
#add in the rotation point form the x and y data, and replace them in the dataframe
rot_data['X'] += rot_x
rot_data['Y'] += rot_y
#and write the file back to a csv
#set precision to keep things clean
pd.set_option('display.float_format', '{:.4E}'.format)
rot_data['X'] = rot_data['X'].round(2)
rot_data['Y'] = rot_data['Y'].round(2)
rot_data['Z'] = rot_data['Z'].round(2)
l = len(data)
print('Translation and rotation complete, there were', l , 'rows of data')
#and write the file back to a csv
rot_data.to_csv("global_" + str(theta) + '.csv', encoding='utf-8', index=False)
| true |
b8f7f78cc4cb71fae50bd278f4bd6e60e0e852b0 | Python | verstal4ik/lections | /lection7.py | UTF-8 | 1,597 | 4.40625 | 4 | [] | no_license | def f1(n:int):
"""
Рассчет факториала, на входе целые числа больше 0
Если меньше нуля, вернет ошибку.
"""
assert n >= 0, "Факториал отр не определен"
if n == 0:
return 1
return f1(n-1)*n
def gcd(a:int,b:int):
"""Алгоритм Евклида
На входе 2 целых числа a и b
Если a равно b возвращаем a
Если a > b, отправляем на следующий круг a-b, b
Иначе b < a, отправляем b
"""
if a == b:
return a
elif a > b:
return gcd(a-b, b)
else: # a < b
return gcd(a, b-a)
def power(a, n):
"""Функция возведения в степень получаем на входе:
a - число
n - степень
возвращаем a * a с понижением показателя степени на 1
"""
if n == 1:
return a
else:
return power(a, n-1)*a
def even_power(a:float, n:int):
"""Ускорим расчет степени относительно стандартного способа шаг n=1
Если показатель степень нечетный, то выполним умножнение 2ух чисел
Иначе, показатель четный, шагом равным n/2 возводим встепень. Доходя
"""
if n == 0:
return 1
elif n % 2 == 1:
return even_power(a, n-1)*a
else:
return even_power(a*a, n//2)
if __name__ == '__main__':
print (f1(n=4))
print (gcd(10, 40))
print (power(2,9))
print (even_power(2,9))
| true |
7999563bcd10c65b331023f92abca704eb63d432 | Python | PrashantManaguli/python_tutorial | /demo.py | UTF-8 | 215 | 3.5625 | 4 | [] | no_license |
import math as m
x = m.pow(3, 4)
print(int(x))
print("Hello World")
print("This first program")
f = open("demofile.txt", "a")
f.write("Now the file has more content!")
f = open("demofile.txt", "r")
print(f.read())
| true |
ef4fcaf75b520b03c689cd9316b5d8e054c806ee | Python | fanghuicocacola/Fanghui-Lang | /python/Code/Python Learner/image_down.py | UTF-8 | 1,024 | 2.546875 | 3 | [] | no_license | import requests
import time
from lxml import etree
import urllib
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4039.400'
}
num = 1000
def geturl(num):
url = f'http://www.uuhuaku.com/uua/buyrent?id={num}'
res = requests.get(url, headers=headers, params=None)
if res.status_code ==200 :
html = res.content.decode('utf-8')
html = etree.HTML(html)
#获取图片url
image =html.xpath('//*[@id="buyrent-Left-Img-Box"]/img/@src')
# 获取标题url
title = html.xpath('//*[@id="buyrent-Right-PaintingName"]/text()')
#成功获取时,下载
if len(image) > 0 and len(title) > 0 :
print(num,'-',title[0],'-',image[0])
path = title[0]+'.jpg'
urllib.request.urlretrieve(image[0],path)
for i in range(1,100,2):
geturl(num+i) | true |
1bef05abcb184ab3359113381af031fe1e35c270 | Python | diabeticwizard10/programming-in-python | /Programming-in-Python-II/example_project/architectures.py | UTF-8 | 2,009 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""example_project/architectures.py
Author -- Michael Widrich
Contact -- widrich@ml.jku.at
Date -- 01.02.2020
###############################################################################
The following copyright statement applies to all code within this file.
Copyright statement:
This material, no matter whether in printed or electronic form, may be used for
personal and non-commercial educational use only. Any reproduction of this
manuscript, no matter whether as a whole or in parts, no matter whether in
printed or in electronic form, requires explicit prior acceptance of the
authors.
###############################################################################
Architectures file of example project.
"""
import torch
class SimpleCNN(torch.nn.Module):
def __init__(self, n_in_channels: int = 1, n_hidden_layers: int = 3, n_kernels: int = 32, kernel_size: int = 7):
"""Simple CNN with `n_hidden_layers`, `n_kernels`, and `kernel_size` as hyperparameters"""
super(SimpleCNN, self).__init__()
cnn = []
for i in range(n_hidden_layers):
cnn.append(torch.nn.Conv2d(in_channels=n_in_channels, out_channels=n_kernels, kernel_size=kernel_size,
bias=True, padding=int(kernel_size/2)))
cnn.append(torch.nn.ReLU())
n_in_channels = n_kernels
self.hidden_layers = torch.nn.Sequential(*cnn)
self.output_layer = torch.nn.Conv2d(in_channels=n_in_channels, out_channels=1,
kernel_size=kernel_size, bias=True, padding=int(kernel_size/2))
def forward(self, x):
"""Apply CNN to input `x` of shape (N, n_channels, X, Y), where N=n_samples and X, Y are spatial dimensions"""
cnn_out = self.hidden_layers(x) # apply hidden layers (N, n_in_channels, X, Y) -> (N, n_kernels, X, Y)
pred = self.output_layer(cnn_out) # apply output layer (N, n_kernels, X, Y) -> (N, 1, X, Y)
return pred
| true |
06c407ee0aa1f4eb6979f6dd33b0ab3882671593 | Python | SquaredPotato/jfa-go | /scripts/generate_ini.py | UTF-8 | 1,590 | 2.828125 | 3 | [
"MIT"
] | permissive | # Generates config file
import configparser
import json
import argparse
from pathlib import Path
def fix_description(desc):
return "; " + desc.replace("\n", "\n; ")
def generate_ini(base_file, ini_file):
"""
Generates .ini file from config-base file.
"""
with open(Path(base_file), "r") as f:
config_base = json.load(f)
ini = configparser.RawConfigParser(allow_no_value=True)
for section in config_base["sections"]:
ini.add_section(section)
if "meta" in config_base["sections"][section]:
ini.set(section, fix_description(config_base["sections"][section]["meta"]["description"]))
for entry in config_base["sections"][section]["settings"]:
if "description" in config_base["sections"][section]["settings"][entry]:
ini.set(section, fix_description(config_base["sections"][section]["settings"][entry]["description"]))
value = config_base["sections"][section]["settings"][entry]["value"]
if isinstance(value, bool):
value = str(value).lower()
else:
value = str(value)
ini.set(section, entry, value)
with open(Path(ini_file), "w") as config_file:
ini.write(config_file)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="input config base from jf-accounts")
parser.add_argument("-o", "--output", help="output ini")
args = parser.parse_args()
print(generate_ini(base_file=args.input, ini_file=args.output))
| true |
8994d693682d10c27fe5e4a30eaf935a1217e834 | Python | yonedahayato/machine_learning_for_trading | /simple_RL_withTensorflow/helper/timer.py | UTF-8 | 2,617 | 3.109375 | 3 | [] | no_license | import copy
import csv
from datetime import datetime as dt
import os
import time
class Timer():
def __init__(self, file_name=""):
self.start_time = ""
self.result_dict_tmp = {"name": "", "start_time": "", "elapsed_time": ""} # deepcopyで使用
self.result_name_list = []
self.results_list = []
self.result_path = ""
self.setting_result_path()
self.file_name = file_name
def setting_result_path(self):
if os.path.exists("./helper"):
self.result_path = "./helper/time_measure_result"
else:
self.result_path = "./time_measure_result"
if not os.path.exists(self.result_path):
os.mkdir(self.result_path)
def start(self, name=None):
if "name" == None:
raise Exception("[Timer, start]: name is invalid.")
result_dict = copy.deepcopy(self.result_dict_tmp)
name = str(name)
result_dict["name"] = name
self.result_name_list.append(name)
result_dict["start_time"] = time.time()
self.results_list.append(result_dict)
def stop(self, name=None):
name = str(name)
if (name == None) or (name not in self.result_name_list):
print("name list")
print(self.result_name_list)
raise Exception("[Timer, stop]: name is invalid.")
Index = self.result_name_list.index(name)
result_dict = self.results_list[Index]
result_dict["elapsed_time"] = time.time() - result_dict["start_time"]
def result_print(self):
if self.results_list == []:
print("there are no results")
return
for Id in range(len(self.results_list)):
result_dict = self.results_list[Id]
name = result_dict["name"]
elapsed_time = result_dict["elapsed_time"]
print("name: {}, elapsed_time: {}".format(name, elapsed_time))
def result_write_csv(self):
now = dt.now()
now_str = now.strftime("%Y-%m-%d-%H-%M-%S")
file_name = self.result_path + "/" + self.file_name +"_"+ now_str + ".csv"
with open(file_name, "w") as f:
writer = csv.DictWriter(f, self.result_dict_tmp.keys())
writer.writeheader()
for result_dict in self.results_list:
writer.writerow(result_dict)
def time_measure():
timer = Timer()
for cnt in range(5):
timer.start(name=cnt)
time.sleep(1)
timer.stop(name=cnt)
timer.result_print()
timer.result_write_csv()
if __name__ == "__main__":
time_measure()
| true |
eeb5b4605c0c0964e0468d7a69c27b74b550312c | Python | chinnuz99/luminarpython | /constructor/personcalss(constructor).py | UTF-8 | 617 | 4.03125 | 4 | [] | no_license | #create person class using constructor,use inheritance in constructor
class Person:
def __init__(self,name,age,gender):
self.name=name
self.age=age
self.gender=gender
def printval(self):
print("name",self.name)
print("age",self.age)
print("gender",self.gender)
class Student(Person):
def __init__(self,rollno,mark,name,age,gender):
super().__init__(name,age,gender)
self.rollno=rollno
self.mark=mark
def print(self):
print(self.rollno)
print(self.mark)
cr=Student(2,34,"anu",22,"female")
cr.printval()
cr.print()
| true |
71f4862b76283158e6ac21871f621f7811b7cee3 | Python | kisisjrlly/Trump-twitter-analysis | /Code_final/Ncut.py | UTF-8 | 4,850 | 2.8125 | 3 | [] | no_license | # The follow function only calculates the value of after clusering
'''
word_net_file:the net file of word or tweet
K:the number of clusters
'''
import numpy as np
from sklearn.metrics.pairwise import rbf_kernel
import math
def cal_gauss(b):
res=rbf_kernel(b)
for i in range(len(res)):
res[i,i]=0
return res
def ncut(matrix,K,ss):
Ncut=0
for i in range(K-1):
ncut_inner=matrix[...,1:][ss[i]:ss[i+1],ss[i]:ss[i+1]].sum()
ncut_out=matrix[...,1:][ss[i]:ss[i+1]].sum()-ncut_inner
Ncut+=(ncut_inner/ncut_out)
return (Ncut,matrix)
# simulated annealing technology
def sa(source,K):
'''
初始温度:B=100
结束温度:b=1
下降系数:t=0.99
迭代次数:loop=10000
'''
res=np.loadtxt(source)
l=len(res)
while 1:
s=np.random.randint(1,l//5,K)
if s.sum()==l:
print(s)
break
temp=0
ss=s
for i in range(len(s)):
temp+=s[i]
ss[i]=temp
ss=ss-s[0]
print(ss)
word_index=np.array(range(1,l+1))
matrix=np.c_[word_index,res]
#初始随机打乱网络矩阵:
#np.random.shuffle(matrix)
for i in range(1000):
a=np.random.randint(1,l)
b=np.random.randint(1,l)
matrix[[a-1,b-1],:]=matrix[[b-1,a-1],:]
matrix[:,[a-1,b-1]]=matrix[:,[b-1,a-1]]
#计算权重,接下来进行计算时要保持行和列的位置
#此处不打算用高斯核函数进行计算,因为0-1矩阵本身就是距离矩阵,此时可选
#matrix=np.c_[word_index,cal_gauss(matrix[...,1:])]
B=100
b=1
t=0.9
loop=10
p=10
(Best_cut_value,Best_cut_com)=ncut(matrix,K,ss)
while B>b:
for m in range(loop):
for j in range(int(B*p)):
#print(j)
choice=np.random.randint(1,l)
#print("choice:%d" %choice)
#print(ss)
choice_index=len(np.where(ss<choice)[0])
#print("choice_index:%d"%choice_index)
if choice_index==1:
matrix[[choice-1, ss[1]-1], :] = matrix[[ss[1]-1, choice-1], :]
matrix[:,[choice, ss[1]]] = matrix[:,[ss[1], choice]]
ss[1]=ss[1]-1
elif choice_index==K:
matrix[[choice-1, ss[K-1]], :] = matrix[[ss[K-1], choice-1], :]
matrix[:,[choice, ss[K-1]]] = matrix[:,[ss[K-1], choice]]
ss[K-1]=ss[K-1]+1
else:
if np.random.rand()>0.5:
matrix[[choice-1,ss[choice_index-1]-1],:]=matrix[[ss[choice_index-1]-1,choice-1],:]
matrix[:,[choice,ss[choice_index-1]]]=matrix[:,[ss[choice_index-1],choice]]
ss[choice_index]-=1
else:
matrix[[choice-1,ss[choice_index-1]-1],:]=matrix[[ss[choice_index-1]-1,choice-1],:]
matrix[:,[choice,ss[choice_index-1]]]=matrix[:,[ss[choice_index-1],choice]]
ss[choice_index-1]+=1
(temp_cut_value,temp_cut_com)=ncut(matrix,K,ss)
if temp_cut_value<Best_cut_value:
(Best_cut_value,Best_cut_com)=(temp_cut_value,temp_cut_com)
else:
if np.random.rand() < math.exp(-( temp_cut_value- Best_cut_value) / B):
(Best_cut_value,Best_cut_com)=(temp_cut_value,temp_cut_com)
B=t*B
return (Best_cut_com,ss)
if __name__=='__main__':
source='C:/Users/Administrator/Desktop/net_word_file_matrix.txt'
word_file="C:/Users/Administrator/Desktop/NoRepatefile.txt"
word_file_after_cut='C:/Users/Administrator/Desktop/word_file_after_cut.txt'
K=10
(result,ss)=sa(source,K)
np.savetxt('C:/Users/Administrator/Desktop/net_word_after_cut.txt',result)
word_index_after_cut=[int(x) for x in list(result[...,0])]
f=open(word_file)
g=open(word_file_after_cut,'w')
Table=f.read().split()
L=len(Table)
i=j=1
temp=[]
for i in range(L):
temp.append(Table[word_index_after_cut[i]-1])
if i==ss[j]:
g.write(' '.join(temp)+'\n')
if j<K-1:
j+=1
temp=[]
g.write(' '.join(temp)+'\n')
g.close()
f.close()
'''
#用于标记有没有被选择过
flag=[0 for x in range(l)]
word_index=[x for x in range(l)]
matrix=np.zeros(l*l).reshape(l,l)
for j in range(l):
temp_index=np.random.randint(1,l)
while 1:
temp_index=np.random.mm1,l)
print(temp_index)
if (flag[temp_index]==0):
matrix[j]=res[temp_index]
flag[temp_index]=1
word_index[j]=temp_index
break
'''
| true |
82ec8a5712d07a76b9bb8d2a3247b64f4eb64f6c | Python | iiepobka/3_lesson_pythons_algorithms | /task_3.py | UTF-8 | 688 | 3.75 | 4 | [] | no_license | # В массиве случайных целых чисел поменять местами минимальный и максимальный
# элементы.
from random import randint
COUNT_ITEMS = 10
START_ITEMS = -100
STOP_ITEMS = 100
my_list = [randint(START_ITEMS, STOP_ITEMS) for x in range(COUNT_ITEMS)]
print(my_list)
max_item = 0
max_item_index = 0
min_item = 0
min_item_index = 0
for n, i in enumerate(my_list):
if max_item < i:
max_item = i
max_item_index = n
elif min_item > i:
min_item = i
min_item_index = n
my_list[max_item_index], my_list[min_item_index] = my_list[min_item_index], my_list[max_item_index]
print(my_list)
| true |
2018ee148b163fef85f15b1ce2ff10dbb728eb4c | Python | rheiland/PhysiCell-EMEWS-2 | /cancer-immune/EMEWS-scripts/python/test/py_tests.py | UTF-8 | 1,148 | 2.796875 | 3 | [
"BSD-3-Clause"
] | permissive | import unittest
import xml.etree.ElementTree as ET
import params2xml
class TestParamsToXML(unittest.TestCase):
def test_params_to_xml(self):
params = {'user_parameters.tumor_radius' : 'foo:bar:100.32',
'user_parameters.number_of_immune_cells' : '10',
'overall.max_time' : '2'}
xml_file = './test/test_data/PhysiCell.xml'
xml_out = './test/test_data/xml_out.xml'
params2xml.params_to_xml(params, xml_file, xml_out)
root = ET.parse(xml_out)
tumor_radius = root.findall("./user_parameters/tumor_radius")[0]
self.assertEqual("foo", tumor_radius.get('type'))
self.assertEqual("bar", tumor_radius.get('units'))
self.assertEqual("100.32", tumor_radius.text)
cells = root.findall("./user_parameters/number_of_immune_cells")[0]
self.assertEqual("int", cells.get('type'))
self.assertEqual("dimensionless", cells.get('units'))
self.assertEqual("10", cells.text)
max_time = root.findall("./overall/max_time")[0]
self.assertEqual('2', max_time.text)
if __name__ == '__main__':
unittest.main() | true |
3d7aaf5be3a0cb7a8c9907580d42dabd12252ea3 | Python | Aasthaengg/IBMdataset | /Python_codes/p03488/s922328263.py | UTF-8 | 691 | 3.109375 | 3 | [] | no_license | S = input().split('T')
S = [len(s) for s in S]
X = S[::2]
Y = S[1::2]
from collections import defaultdict,deque
dpx = defaultdict(int)# 初期値を0にした辞書
dpy = defaultdict(int)# 初期値を0にした辞書
dpx[sum(X)] = 1
dpy[sum(Y)] = 1
que = deque()
for i,x in enumerate(X):
if i == 0:
continue
for k,v in dpx.items():
if v:
que.append(k-2*x)
while que:
q = que.popleft()
dpx[q] = 1
for i,y in enumerate(Y):
for k,v in dpy.items():
if v:
que.append(k-2*y)
while que:
q = que.popleft()
dpy[q] = 1
x,y = map(int,input().split())
print('Yes' if dpx[x] and dpy[y] else 'No')
| true |
7e8e2cb78637c4576d8bae7ee9b0455b6345e143 | Python | Tan-Qiyu/simplicial_neural_networks | /data/s2_7_cochains_to_missingdata.py | UTF-8 | 6,256 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
Input: Simplicial complex , k-cochains and percentage of missing data
Output: k-cochains where the percentage of missing data has been replaced by a placehold values
"""
import numpy as np
from scipy import sparse
from scipy.sparse import coo_matrix
from random import shuffle
import time
def build_missing_values(simplices,percentage_missing_values,max_dim=10):
"""
The functions randomly deletes a given percenatge of the values of simplices in each dimension
of a simplicial complex.
Parameters
----------
simplices: list of dictionaries
List of dictionaries, one per dimension d. The size of the dictionary
is the number of d-simplices. The dictionary's keys are sets (of size d
+ 1) of the 0-simplices that constitute the d-simplices. The
dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
percenatge_missing_values: integer
Percentage of values missing
max_dim: integer
maximal dimension of the simplices to be considered.
Returns
----------
missing_values: list of dictionaries
List of dictionaries, one per dimension d. The dictionary's keys are the missing d-simplices.
The dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
"""
missing_values = [dict() for _ in range(max_dim+1)]
for i in range(max_dim+1):
simp=list(simplices[i].keys())
l=int(np.ceil((len(simp)/100)*percentage_missing_values))
simp_copy=np.copy(simp)
shuffle(simp_copy)
loss_simp = simp_copy[:l]
for index,simplex in enumerate(loss_simp):
dim=len(simplex)
missing_values[i][simplex]=simplices[dim-1][simplex]
return(missing_values)
###craete input cochain by substituing median values in unseen collaboration
def build_damaged_dataset(cochains,missing_values,function=np.median):
"""
The function replaces the missing values in the dataset with a value inferred
from the known data (eg the missing values are replaced buy the median or median
or mean of the known values).
Parameters
----------
cochains: list of dictionaries
List of dictionaries, one per dimension k.
The dictionary's keys are the k-simplices. The
dictionary's values are the k-cochains
missing_values: list of dctionaries
List of dictionaries, one per dimension k.
The dictionary's keys are the missing k-simplices. The
dictionary's values are their indices
function: callable
missing values are replaced by the function of the known values, defaut median
Returns
----------
damaged_dataset: list of dictionaries
List of dictionaries, one per dimension d. The dictionary's keys are the d-simplices.
The dictionary's values are the d-cochains where the damaged portion has been replaced
by the given function value.
"""
##Find median value
max_dim=len(cochains)
signal = np.copy(cochains)
signal=np.array([np.array(list(signal[i].values())) for i in range(len(signal))])
median_random=[]
for dim in range(len(signal)):
m=[signal[dim][j] for j in range(len(signal[dim]))]
median_random.append(function(m))
#print('Median is ',np.median(m))
## Create input usining median value for unknown values
damaged_dataset = np.copy(cochains)
for i in range(max_dim):
simp=list(missing_values[i].keys())
for index,simplex in enumerate(simp):
dim=len(simplex)
damaged_dataset[i][simplex]=median_random[dim-1]
return(damaged_dataset)
###Inices and values of the "seen" simplices
def built_known_values(missing_values,simplices):
"""
The functions return the not missing simplices and cochains in each dimension
Parameters
----------
missing_values: list of dictionaries
List of dictionaries, one per dimension d. The dictionary's keys are the missing d-simplices.
The dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
simplices: list of dictionaries
List of dictionaries, one per dimension d. The size of the dictionary
is the number of d-simplices. The dictionary's keys are sets (of size d
+ 1) of the 0-simplices that constitute the d-simplices. The
dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
Returns
----------
known_values: list of dictionaries
List of dictionaries, one per dimension d. The dictionary's keys are not missing d-simplices.
The dictionary's values are their cochains.
"""
max_dim=len(simplices)
known_values = [dict() for _ in range(max_dim+1)]
for i in range(max_dim):
real_simp=list(set(simplices[i].keys())-set(missing_values[i].keys()))
for index,simplex in enumerate(real_simp):
dim=len(simplex)
known_values[i][simplex]=simplices[dim-1][simplex]
return(known_values)
if __name__ == '__main__':
start = time.time()
def timeit(name):
print('wall time ({}): {:.0f}s'.format(name, time.time() - start))
starting_node=150250
percentage_missing_values=30
cochains = np.load(f's2_3_collaboration_complex/{starting_node}_cochains.npy')
simplices = np.load(f's2_3_collaboration_complex/{starting_node}_simplices.npy')
missing_values=build_missing_values(simplices,percentage_missing_values=30,max_dim=10)
damaged_dataset=build_damaged_dataset(cochains,missing_values,function=np.median)
known_values=built_known_values(missing_values,simplices)
timeit('process')
np.save(f's2_3_collaboration_complex/{starting_node}_percentage_{percentage_missing_values}_missing_values.npy', missing_values)
np.save(f's2_3_collaboration_complex/{starting_node}_percentage_{percentage_missing_values}_input_damaged.npy', damaged_dataset)
np.save(f's2_3_collaboration_complex/{starting_node}_percentage_{percentage_missing_values}_known_values.npy', known_values)
timeit('total')
| true |
573100e6b4c7664506537384433af3d815a6895f | Python | whistlepunk-labs/Pixel-Grid | /Grid.py | UTF-8 | 4,227 | 2.890625 | 3 | [] | no_license | import bpy
import math
class Grid:
def __init__(self,width,height):
#Width and height of array
self.sizeX = width
self.sizeY = height
#instantiate 2d array for grid
self.grid = [[0 for i in range(self.sizeY)] for j in range(self.sizeX)]
self.instantiate_grid()
#Instantiates the grid objects
#arranges it as a flat, vertical standing grid
def instantiate_grid(self):
for x in range(self.sizeX):
for y in range(self.sizeY):
cubename = "GridCube ({0},{1})".format(x,y)
#try to reuse cubes from earlier generation to save processing time
try:
ob = bpy.data.objects[cubename]
print("Found object {0},{1}".format(x,y))
except:
print("instantiating object {0},{1}".format(x,y))
#create cube
bpy.ops.mesh.primitive_cube_add()
#bpy.ops.mesh.primitive_ico_sphere_add(radius=.5,location=(x*1, 0, sizeY - y))
ob = bpy.context.active_object
ob.name = "GridCube ({0},{1})".format(x,y)
ob.scale = (.35,.35,.35)
ob.location = (x*1,0,self.sizeY-y)
#add it to object array
self.grid[x][y] = ob
#create new matherial and add it to the object
mat_name = "Grid-Material({0},{1})".format(x,y)
mat = bpy.data.materials.new(name=mat_name)
ob.data.materials.append(mat)
#make the new material use nodes,
#then set those nodes to an emission shader by making links between the shader to the output
mat.use_nodes = True
node = mat.node_tree.nodes.new(type="ShaderNodeEmission")
inp = bpy.data.materials[mat_name].node_tree.nodes['Material Output'].inputs['Surface']
outp = bpy.data.materials[mat_name].node_tree.nodes['Emission'].outputs['Emission']
bpy.data.materials[mat_name].node_tree.links.new(inp,outp)
#draws the image onto the array of objects
#draws only the topleft pixels of pictures
def draw_image(self,image):
for x in range(self.sizeX):
for y in range(self.sizeY):
node = self.grid[x][y].active_material.node_tree.nodes['Emission']
pix = image.getpixel((x,y))
print("Painting Pixel: {0},{1}".format(x,y))
rgb = self.color_correct(pix,3)
node.inputs[0].default_value = rgb
#Converts image color to blender friendly tuple
#gamma color corrects the image because apparently blender doesn't color correct rgb automatically
def color_correct(self,color,gamma):
r = pow(color[0]/255,gamma)
g = pow(color[1]/255,gamma)
b = pow(color[2]/255,gamma)
a = 1
return (r,g,b,a)
#arrange grid as a cyllinder of wrapping pixels
def arrange_as_cyllinder(self):
radius = (self.sizeY/(math.pi*2))*2
theta = math.radians(360/self.sizeX)
for x in range(self.sizeX):
for y in range(self.sizeY):
ob = self.grid[x][y]
ob.location = (radius*math.cos(x*theta),radius*math.sin(x*theta),self.sizeY-y)
ob.rotation_euler = (0,0,x*theta)
#clears the screen and all materials (Includes camera BEWARE!)
def clear():
#delte all objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
#delete materials
for material in bpy.data.materials:
material.user_clear()
bpy.data.materials.remove(material) | true |
e85db833f505953608bb1df10b61513f608a6ddf | Python | bdebenon/TAMU_Science_Competition | /SumResults.py | UTF-8 | 1,362 | 2.546875 | 3 | [] | no_license | from google.cloud import storage
import pandas as pd
import numpy as np
import os
import io
import csv
import math
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.linear_model import LinearRegression
actualDict = {}
predictedDict = {}
for x in range(10,78):
if (x < 10):
communityNumber = "0" + str(x)
else:
communityNumber = "" + str(x)
communityName = "community_" + communityNumber
file_name = 'results/community_' + communityNumber + '.csv'
df = pd.read_csv(file_name)
dates = df['Date'].values
actual = df['Actual'].values
prediction = df['Prediction'].values
x = 0
for date in dates:
if date in actualDict:
actualDict[date] += actual[x]
predictedDict[date] += prediction[x]
else:
actualDict[date] = actual[x]
predictedDict[date] = prediction[x]
x += 1
resultsActual = pd.DataFrame(list(actualDict.items()), columns=['Date', 'Actual'])
resultsPredicted = pd.DataFrame(list(predictedDict.items()), columns=['Date', 'Prediction'])
results = resultsActual.copy()
results['Prediction'] = resultsPredicted['Prediction']
file_name = 'OverallResults.csv'
results.to_csv(file_name, encoding='utf-8', index=False)
| true |
4212e36d8f58c6daa1eca4f90874a489d4a8f3b0 | Python | syurskyi/Python_Topics | /125_algorithms/_exercises/templates/_algorithms_challenges/codeabbey/_Python_Problem_Solving-master/fibonacci sequence index.py | UTF-8 | 381 | 2.9375 | 3 | [] | no_license | fib_store [0,1]
#generating the fibonacci Sequence
___ i __ r..(2,1000
fib_store.a..(fib_store[-2] + fib_store[-1])
#searching for the index of the given element from Fibonacci Sequence
___ i __ r..(i..(i.. ))):
fib_num i..(input
#using index to find the index of the element from fibonacci sequence
fib_index fib_store.i.. fib_num)
print(fib_index,end' ') | true |
dc53bba28aa0f17d9e92e80b6639f72e03554c52 | Python | ajnelson/freenas | /src/freenas-pkgtools/lib/Package.py | UTF-8 | 2,972 | 2.78125 | 3 | [] | no_license | import os
import sys
import Exceptions
NAME_KEY = "Name"
VERSION_KEY = "Version"
CHECKSUM_KEY = "Checksum"
SIZE_KEY = "FileSize"
UPGRADES_KEY = "Upgrades"
class Package(object):
_name = None
_version = None
_checksum = None
_size = None
_updates = None
_dirty = False
def __init__(self, *args):
self._dict = {}
# We can be called with a dictionary, or with (name, version, checksum)
if len(args) == 1 and isinstance(args[0], dict):
tdict = args[0]
for k in tdict.keys():
if k == UPGRADES_KEY:
updates = []
for update in tdict[UPGRADES_KEY]:
updates.append(update.copy())
self._dict[UPGRADES_KEY] = updates
else:
self._dict[k] = tdict[k]
else:
if len(args) > 0: self.SetName(args[0])
if len(args) > 1: self.SetVersion(args[1])
if len(args) > 2: self.SetChecksum(args[2])
return
def dict(self):
return self._dict
if self._size is not None:
rv[SIZE_KEY] = self._size
return rv
def Size(self):
if SIZE_KEY in self._dict:
return self._dict[SIZE_KEY]
return None
def SetSize(self, size):
self._dict[SIZE_KEY] = size
def Name(self):
return self._dict[NAME_KEY]
def SetName(self, name):
self._dict[NAME_KEY] = name
return
def Version(self):
return self._dict[VERSION_KEY]
def SetVersion(self, version):
self._dict[VERSION_KEY] = version
return
def Checksum(self):
if CHECKSUM_KEY in self._dict:
return self._dict[CHECKSUM_KEY]
return None
def SetChecksum(self, checksum):
self._dict[CHECKSUM_KEY] = checksum
return
def SetUpdates(self, updates):
self._dict[UPGRADES_KEY] = []
for upd in updates:
size = None
if SIZE_KEY in upd:
size = up[SIZE_KEY]
self.AddUpdate(upd[VERSION_KEY], upd[CHECKSUM_KEY], size)
return
def AddUpdate(self, old, checksum, size = None):
if UPGRADES_KEY not in self._dict:
self._dict[UPGRADES_KEY] = []
t = { VERSION_KEY : old, CHECKSUM_KEY : checksum }
if size is not None: t[SIZE_KEY] = size
self._dict[UPGRADES_KEY].append(t)
return
def Updates(self):
if UPGRADES_KEY in self._dict:
return self._dict[UPGRADES_KEY]
return []
def FileName(self, old = None):
# Very simple function, simply concatenate name, version.
# Format is <name>-<version>.tgz, or
# <name>-<old>-<version>.tgz if old is not None.
if old is None:
return "%s-%s.tgz" % (self.Name(), self.Version())
else:
return "%s-%s-%s.tgz" % (self.Name(), old, self.Version())
| true |
af4a954b59a873642c0b594455b7a539f3460a2f | Python | B31G3L/GITReminder | /src/object/Status.py | UTF-8 | 360 | 2.59375 | 3 | [] | no_license |
class Status:
def __init__(self, *args, **kwargs):
self.stop = True
self.start = False
def start(self):
self.start = True
self.stop = False
def stop(self):
self.stop = True
self.start = False
def isStopped(self):
return self.stop
def isStarted(self):
return self.start | true |
d401ddeb4a04ff9566d15410e8e3d15e80cf82bc | Python | tlxxzj/leetcode | /43. Multiply Strings.py | UTF-8 | 644 | 2.921875 | 3 | [] | no_license | class Solution:
def multiply(self, num1: str, num2: str) -> str:
num1 = [int(i) for i in num1[::-1]]
num2 = [int(i) for i in num2[::-1]]
n1, n2 = len(num1), len(num2)
num3 = [0] * (n1 + n2)
for i in range(n1):
k = 0
for j in range(n2):
k += num3[i+j] + num1[i] * num2[j]
num3[i+j] = k%10
k //= 10
num3[i+n2] = k
i = n1 + n2 - 1
while i >= 0 and num3[i]==0:
i -= 1
if i == -1:
return '0'
return ''.join([str(j) for j in num3[:i+1][::-1]])
| true |
76513aa6da720cd2a267ec0b6ab4c313a538a01e | Python | laeshiny/slack | /slack/__init__.py | UTF-8 | 1,770 | 2.734375 | 3 | [] | no_license | import urllib
import urllib2
SLACK_API_URL = 'https://slack.com/api/{METHOD}'
REQUEST_TIMEOUT = 10
__all__ = ['Chat', 'Files']
class Request(object):
def __init__(self, token, channel, timeout):
self._slack_api_url = SLACK_API_URL
self._token = token
self._channel = channel
self._timeout = timeout
def _request(self, url, values):
values['token'] = self._token
data = urllib.urlencode(values)
req = urllib2.Request(url=url, data=data)
f = urllib2.urlopen(req, timeout=self._timeout)
return f
class Chat(Request):
def __init__(self, token, channel, timeout):
super(Chat, self).__init__(token, channel, timeout)
def post_message(self, text):
url = self._slack_api_url.format(METHOD='chat.postMessage')
values = {'channel': self._channel, 'text': text}
response = self._request(url, values)
return response
class Files(Request):
def __init__(self, token, channel, timeout):
super(Files, self).__init__(token, channel, timeout)
def upload_file(self, filename):
url = self._slack_api_url.format(METHOD='files.upload')
content = self._get_content(filename)
values = {'channel': self._channel, 'filename': filename, 'content': content}
response = self._request(url, values)
return response
def _get_content(self, filename):
with open(filename) as f:
content = f.read()
return content
class Slack(object):
def __init__(self, token, channel, timeout=REQUEST_TIMEOUT):
self._token = token
self._channel = channel
self.chat = Chat(token, channel, timeout)
self.files = Files(token, channel, timeout)
| true |
bf43d467472dc112f46cd9e06766a63b3e846dcd | Python | GeovaniSTS/PythonCode | /21 - Loop_Ímpares.py | UTF-8 | 224 | 3.625 | 4 | [] | no_license | num1 = 0
num2 = 0
while num2 >= num1:
n = input()
m = input()
num1 = int(n)
num2 = int(m)
if n % 2 == 0:
print('Par')
else:
print('Impar')
print(num2/num1)
print(num1/num2) | true |
9a41b27fa0e991c62d10cae27adc4020d74ac29d | Python | Aasthaengg/IBMdataset | /Python_codes/p02949/s507326618.py | UTF-8 | 972 | 2.953125 | 3 | [] | no_license | import sys
from collections import deque
def dfs(x,s):
used = {s}
search = deque([s])
while search:
ss = search.pop()
for sss in x[ss]:
if sss in used:
continue
used.add(sss)
search.append(sss)
return used
def bellmanford(edges):
coins = [-float("inf")] * N
coins[0] = 0
for _ in range(N):
f = True
for u,v,c in edges:
if coins[u] + c > coins[v]:
coins[v] = coins[u] + c
f = False
if f:
return max(0, coins[-1])
return -1
N,M,P = map(int, input().split())
ABC = []
Adake = [[] for _ in range(N)]
Bdake = [[] for _ in range(N)]
for _ in range(M):
A,B,C = map(int, input().split())
ABC.append((A-1,B-1,C-P))
Adake[A-1].append(B-1)
Bdake[B-1].append(A-1)
U = dfs(Adake,0) & dfs(Bdake,N-1)
ABC = [(a,b,c) for (a,b,c) in ABC if a in U and b in U]
print(bellmanford(ABC)) | true |
a3a0f93898aa660184ee291bc66bf05ac1ae1c9f | Python | vinay10949/DataStructuresAndAlgorithms | /Problems/Stacks/DesignStackPushPopGetMiddleDeleteMiddle.py | UTF-8 | 2,678 | 4.40625 | 4 | [] | no_license | '''
***************************************************************************************************
Problem Statement:
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example 1:
Input
["MinStack","push","push","push","getMin","pop","top","getMin"]
[[],[-2],[0],[-3],[],[],[],[]]
Output
[null,null,null,null,-3,null,0,-2]
Explanation
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); // return -3
minStack.pop();
minStack.top(); // return 0
minStack.getMin(); // return -2
Constraints:
Methods pop, top and getMin operations will always be called on non-empty stacks.
'''
import sys
class node:
def __init__(self, info):
self.info = info
self.next = None
self.prev = None
class Stack:
def __init__(self):
self.top = None
def isEmpty(self):
if self.top is None:
return True
return False
def push(self,data):
self.temp=node(data)
if self.temp is None:
print("Stack overflow")
return
self.temp.next=self.top
self.top=self.temp
def pop(self):
if self.isEmpty():
print("Stack Underflow")
sys.exit(0)
d=self.top.info
self.top=self.top.next
return d
def peek(self):
if self.isEmpty():
print("Stack Underflow")
sys.exit(0)
d=self.top.info
return d
def display(self):
if self.isEmpty():
print("Stack Underflow")
sys.exit(0)
self.p=self.top
while self.top is not None:
print(self.p.info)
self.p=self.p.next
if __name__=='__main__':
s = Stack()
while(1):
print("1.Push\n");
print("2.Pop\n");
print("3.Display the top element\n");
print("4.Display all stack elements\n");
print("5.Quit\n");
print("Enter your choice : ");
choice=int(input())
if choice==1:
value=int(input())
s.push(value)
elif choice==2:
d=s.pop()
print("poped value",d)
elif choice==3:
print("top of the element",s.peek())
elif choice==4:
s.display()
else:
sys.exit(0)
| true |
d23f45ee0c17921f8c498f1de7da2044942723ee | Python | RebortBoss/RingTimer | /ringtimer.py | UTF-8 | 1,299 | 2.890625 | 3 | [] | no_license | #coding:utf-8
class RingBuffer(object):
def __init__(self, timeout):
self.timeout = timeout
self.slot_tasks = {}
'''
slot0: {id: value}
slot1: {id: value}
'''
self.init_slot_tasks()
self.task_slot_index = {}
self.cursor = 1
def init_slot_tasks(self):
for i in range(1, self.timeout + 1):
self.slot_tasks[i] = {}
def set_task_slot(self, d, slot):
if self.task_slot_index.get(d, None):
del self.task_slot_index[d]
self.task_slot_index[d] = slot
def add_slot_task(self, k, ts):
slot = self.before_cursor
_dict = self.slot_tasks.get(slot, {})
_dict[k] = ts
return slot
def del_slot_task(self, k):
slot_index = self.task_slot_index.get(k)
_dict = self.slot_tasks.get(slot_index, {})
if _dict.get(k):
del _dict[k]
def next(self):
if self.cursor == self.timeout:
self.cursor = 1
return self.cursor
self.cursor += 1
return self.cursor
@property
def before_cursor(self):
if self.cursor == 1:
return self.timeout
return self.cursor - 1
@property
def now_cursor(self):
return self.cursor
| true |
38bc7a9d2720edb27ce510caeeb04ff6befb7365 | Python | AmirHoseinSafari/LRCN-drug-resistance | /dataset_creator/bccdc_data.py | UTF-8 | 6,795 | 2.875 | 3 | [] | no_license | import csv
import pandas as pd
# isolate = ['A', 'B', 'A']
# print(type(isolate))
# print(type(isolate[0]))
# iso_dict = dict()
# for i in range(0, len(isolate)):
# if isolate[i] in iso_dict:
# index_pointer = int(iso_dict[isolate[i]])
# else:
# iso_dict.update({isolate[i]: str(152)})
#
# print(iso_dict)
# graph={'A':['B','C'],
# 'B':['C','D']}
#
# print('A' in graph)
def load_gene_positions():
dt = pd.read_csv('../Data/EPFL_Data/Mycobacterium_tuberculosis_H37Rv_allGenes.csv')
dt.set_index(dt.columns[0], inplace=True, drop=True)
start = sum(dt[['Start']].values.tolist(),[])
stop = sum(dt[['Stop']].values.tolist(), [])
# start = start[0:3981]
# stop = stop[0:3981]
# dt = pd.read_csv('../Data/EPFL_Data/Mycobacterium_tuberculosis_H37Rv_genes_v3.csv')
# dt.set_index(dt.columns[0], inplace=True, drop=True)
#
# start.extend(sum(dt[['Start']].values.tolist(), []))
# stop.extend(sum(dt[['Stop']].values.tolist(), []))
return start, stop
def binary_search(start, stop, low, high, x):
# Check base case
if high >= low:
mid = (high + low) // 2
# If element is present at the middle itself
if start[mid] <= x <= stop[mid]:
return mid
# If element is smaller than mid, then it can only
# be present in left subarray
elif start[mid] > x:
return binary_search(start, stop, low, mid - 1, x)
# Else the element can only be present in right subarray
else:
return binary_search(start, stop, mid + 1, high, x)
else:
# Element is not present in the array
return -1
tsv_file = open("../Data/bccdc_snippy_179_snps_training_data.tsv")
read_tsv = csv.reader(tsv_file, delimiter="\t")
isolate = []
position = []
for row in read_tsv:
isolate.append(row[0])
position.append(row[4])
isolate = isolate[1:]
position = position[1:]
for i in range (0, len(position)):
position[i] = int(position[i])
start, stop = load_gene_positions()
zeros = []
res = []
for i in range(0, len(start)):
zeros.append(0)
iso_dict = dict()
for i in range(0, len(isolate)):
if i % 100 == 0:
print(i)
index_pointer = -1
if isolate[i] in iso_dict:
index_pointer = int(iso_dict[isolate[i]])
else:
tmp = [isolate[i]]
tmp.extend(zeros)
res.append(tmp)
iso_dict.update({isolate[i]: str(len(res) - 1)})
index_pointer = len(res) - 1
# if len(res) == 0:
# tmp = [isolate[i]]
# tmp.extend(zeros)
# res.append(tmp)
# index_pointer = 0
# else:
# for j in range(0, len(res)):
# if res[j][0] == isolate[i]:
# index_pointer = j
# break
# if index_pointer == -1:
# tmp = [isolate[i]]
# tmp.extend(zeros)
# res.append(tmp)
# index_pointer = len(res) - 1
gene_index = binary_search(start, stop, 0, len(start) - 1, position[i])
if gene_index != -1:
res[index_pointer][gene_index + 1] = res[index_pointer][gene_index + 1] + 1
# found_gene = -1
# for j in range(0, len(start)):
# passed = 0
# if start[j] <= position[i] <= stop[j]:
# # print("1111111111111")
# # print(j)
# # print(len(res[index_pointer]))
# res[index_pointer][j+1] = res[index_pointer][j+1] + 1
# break
# if start[j] > position[i]:
# passed = 1
# if passed == 1 and position[i] > stop[j]:
# break SRR6153157
f = open('bccdc_data.csv', 'w')
for item in res:
for i in range(len(item)):
if i == 0:
f.write(str(item[i]))
else:
f.write(',' + str(item[i]))
f.write('\n')
f.close()
#res : 1 sus : 0
tsv_file = open("../Data/bccdc_phenotype.tsv")
read_tsv = csv.reader(tsv_file, delimiter="\t")
isolate = []
drug = []
status = []
for row in read_tsv:
isolate.append(row[0])
drug.append(row[1])
status.append(row[2])
isolate = isolate[1:]
drug = drug[1:]
status = status[1:]
zeros_d = []
labels = []
for i in range(0, 5):
zeros_d.append(-1)
for i in range(0, len(res)):
tmp = []
for i in range(0, 5):
tmp.append(-1)
# tmp = [res[i][0]]
labels.append(tmp)
print(labels)
for i in range(0, len(isolate)):
# print(labels[0:10])
if isolate[i] in iso_dict:
index_pointer = int(iso_dict[isolate[i]])
else:
# print("error")
continue
if drug[i] == "PYRAZINAMIDE":
if status[i] == "RESISTANT":
# print(status[i])
# print(i)
# print(index_pointer)
# print("ASdadq")
# print(labels[index_pointer])
labels[index_pointer][2] = 1
# print(labels[index_pointer])
elif status[i] == "SUSCEPTIBLE":
# print(status[i])
# print(i)
# print(index_pointer)
labels[index_pointer][2] = 0
else:
print("wtf")
elif drug[i] == "ISONIAZID":
if status[i] == "RESISTANT":
# print("ASdadq")
labels[index_pointer][3] = 1
elif status[i] == "SUSCEPTIBLE":
labels[index_pointer][3] = 0
else:
print("wtf")
elif drug[i] == "STREPTOMYCIN":
if status[i] == "RESISTANT":
# print("ASdadq")
labels[index_pointer][0] = 1
elif status[i] == "SUSCEPTIBLE":
labels[index_pointer][0] = 0
else:
print("wtf")
elif drug[i] == "RIFAMPICIN":
if status[i] == "RESISTANT":
# print("ASdadq")
labels[index_pointer][1] = 1
elif status[i] == "SUSCEPTIBLE":
labels[index_pointer][1] = 0
else:
print("wtf")
elif drug[i] == "ETHAMBUTOL":
if status[i] == "RESISTANT":
# print("sdfewf e")
# print(labels[index_pointer])
labels[index_pointer][4] = 1
# print(labels[index_pointer])
elif status[i] == "SUSCEPTIBLE":
# print("sdfewf e")
# print(labels[index_pointer])
labels[index_pointer][4] = 0
# print(labels[index_pointer])
else:
print("wtf")
else:
print("errorrrrrrr")
continue
f = open('bccdc_data_label.csv', 'w')
for i in range(0, len(labels)):
for j in range(0, len(labels[i])):
if labels[i][j] != 0:
print(labels[i][j] )
for item in labels:
# print(item)
for i in range(len(item)):
if i == 0:
f.write(str(item[i]))
else:
f.write(',' + str(item[i]))
f.write('\n')
f.close()
| true |
e2b1370bcf2171108db022f88373994eb8104d2f | Python | Infern1x/Assignments | /assignment_4.py | UTF-8 | 1,915 | 3.40625 | 3 | [] | no_license | def times(num):
return ("times", num)
def plus(num):
return ("plus", num)
def minus(num):
return ("minus", num)
def zero(tup = ()):
if tup == ():
return 0
elif tup[0] == "plus":
return 0 + tup[1]
elif tup[0] == "minus":
return 0 - tup[1]
elif tup[0] == "times":
return 0 * tup[1]
def one(tup = ()):
if tup == ():
return 1
elif tup[0] == "plus":
return 1 + tup[1]
elif tup[0] == "minus":
return 1 - tup[1]
elif tup[0] == "times":
return 1 * tup[1]
def two(tup = ()):
if tup == ():
return 2
elif tup[0] == "plus":
return 2 + tup[1]
elif tup[0] == "minus":
return 2 - tup[1]
elif tup[0] == "times":
return 2 * tup[1]
def three(tup = ()):
if tup == ():
return 3
elif tup[0] == "plus":
return 3 + tup[1]
elif tup[0] == "minus":
return 3 - tup[1]
elif tup[0] == "times":
return 3 * tup[1]
def four(tup = ()):
if tup == ():
return 4
elif tup[0] == "plus":
return 4 + tup[1]
elif tup[0] == "minus":
return 4 - tup[1]
elif tup[0] == "times":
return 4 * tup[1]
def five(tup = ()):
if tup == ():
return 5
elif tup[0] == "plus":
return 5 + tup[1]
elif tup[0] == "minus":
return 5 - tup[1]
elif tup[0] == "times":
return 5 * tup[1]
def six(tup = ()):
if tup == ():
return 6
elif tup[0] == "plus":
return 6 + tup[1]
elif tup[0] == "minus":
return 6 - tup[1]
elif tup[0] == "times":
return 6 * tup[1]
def seven(tup = ()):
if tup == ():
return 7
elif tup[0] == "plus":
return 7 + tup[1]
elif tup[0] == "minus":
return 7 - tup[1]
elif tup[0] == "times":
return 7 * tup[1]
def eight(tup = ()):
if tup == ():
return 8
elif tup[0] == "plus":
return 8 + tup[1]
elif tup[0] == "minus":
return 8 - tup[1]
elif tup[0] == "times":
return 8 * tup[1]
def nine(tup = ()):
if tup == ():
return 9
elif tup[0] == "plus":
return 9 + tup[1]
elif tup[0] == "minus":
return 9 - tup[1]
elif tup[0] == "times":
return 9 * tup[1]
| true |
6a646ca8864a98c051948a998720d17582434ef6 | Python | mblakemi/python-jpg-date | /test_py_programs/nToaa.py | UTF-8 | 1,270 | 2.8125 | 3 | [] | no_license | #dir directories
import os,sys
from PIL import Image
import re
import datetime
def nToAA(ival):
# first 26 are ' a' - ' z', then 'aa' to 'zz'
# after 26*26 = 676 then three digits and doesn't sort correctly
sret = chr(ord('a') + (ival % 26))
iupper = int(ival / 26)
sret = chr(ord('a') + (ival % 26))
if iupper == 0:
return ' ' + sret
while (iupper):
ival = int(iupper % 26)
iupper = int(iupper / 26)
sret = chr(ord('a') + (ival % 26) - 1) + sret
return sret
##mylist = [' b 1', ' a 2', 'bb 3', ' c 4', ' b 5', 'aa 6']
##mylist.sort()
##for a in mylist:
## print (a)
nlist = [1,4,25,26,27, 100, 500, 1000, 1500]
ntext = []
for n in nlist:
ntext.append(nToAA(n))
ntestsort = ntext.copy()
ntestsort.sort()
i = 0
for v in ntext:
print(v, ntestsort[i])
i += 1
filename = '2016-12-20 03-00-35 P1070664.JPG'
# check that correct ####-##-## ##-##-## format
found = re.findall('^(\d\d\d\d-\d\d-\d\d )(\d\d)-(\d\d)-\d\d (.*?) \d\d\d\.(...$)', filename)
bOK = True
if len(found) == 0:
# check for panasonic
found = re.findall('^(\d\d\d\d-\d\d-\d\d )(\d\d)-(\d\d)-\d\d (P)\d+\.(...$)', filename)
if len(found) == 0:
bOK = False
if (bOK):
d, h, m, name, ext = found[0]
| true |
51199884acd513b6c37befc6f93fac4659e49c4e | Python | mrkzncv/FlaskApp-HW1 | /app/ml_models/mlmodels.py | UTF-8 | 7,213 | 3.140625 | 3 | [] | no_license | import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import SVR, SVC
from sklearn.model_selection import GridSearchCV
class MLModelsDAO:
def __init__(self, ):
self.ml_models = []
self.counter = 0
def get(self, id):
"""
Функция обучает модель с заданным id и выдает предсказания
:param id: integer: уникальный идентификатор модели
:return: list: предсказания модели
"""
f_name = None
for model in self.ml_models:
if model['id'] == id:
f_name = f"models static/{model['problem']}_{model['name']}_{model['id']}.pickle"
trained_model = pickle.load(open(f_name, 'rb'))
prediction = trained_model.predict(np.array(model['X']))
return prediction.tolist()
if f_name is None:
raise NotImplementedError('ml_model {} does not exist'.format(id))
def create(self, data, is_new=True): # пришли данные, надо присвоить id (для POST)
"""
Обучение (переобучение) модели. На вход подается запрос на обучение модели и данные.
Если у нас предусмотрена запрашиваемая функциональность, модель обучается и записывается в pickle
с id модели в названии файла. Путь до файла записывается в json с ключом 'model_path'.
:param data: json {'problem': 'classification', 'name': 'Random Forest', 'h_tune': False, 'X':x, 'y':y}
:param is_new: boolean: новая ли модель или надо переобучать существующую
:return: список обученных моделей
"""
ml_model = data
if (ml_model['problem'] in ['classification', 'regression']) and \
(ml_model['name'] in ['Random Forest', 'SVM']):
if is_new:
self.counter += 1
ml_model['id'] = self.counter
x, y = np.array(ml_model['X']), np.array(ml_model['y'])
if ml_model['problem'] == 'classification':
best_model = self.classification(ml_model['name'], x, y, h_tune=ml_model['h_tune'])
f_name = f"models static/{ml_model['problem']}_{ml_model['name']}_{ml_model['id']}.pickle"
pickle.dump(best_model, open(f_name, 'wb'))
ml_model['model_path'] = f_name
elif ml_model['problem'] == 'regression':
best_model = self.regression(ml_model['name'], x, y, h_tune=ml_model['h_tune'])
f_name = f"models static/{ml_model['problem']}_{ml_model['name']}_{ml_model['id']}.pickle"
pickle.dump(best_model, open(f_name, 'wb'))
ml_model['model_path'] = f_name
if is_new:
self.ml_models.append(ml_model)
else:
raise NotImplementedError("""Сейчас доступны для обучения только classification and regression:
Random Forest или SVM""")
return ml_model
def update(self, id, data):
"""
Функция либо переобучает модель, либо выдает ошибку, что такой модели ещё нет, надо создать новую
:param id: integer: уникальный идентификатор модели
:param data: json с новыми параметрами для модели
:return: ничего не выдает
"""
ml_model = None
for model in self.ml_models:
if model['id'] == id:
ml_model = model # json со старыми параметрами
if ml_model is None:
raise NotImplementedError('Такой модели ещё нет, надо создать новую')
else:
if (data['name'] == ml_model['name']) and (data['problem'] == ml_model['problem']):
ml_model.update(data) # кладу в них новые данные, 'X', 'y', 'h_tune'
self.create(ml_model, is_new=False) # переобучаю модель
else:
raise NotImplementedError('Такой модели ещё нет, надо создать новую')
def delete(self, id):
"""
Удаление модели по id
:param id: integer: уникальный идентификатор модели
:return: удаление модели из списка моделей
"""
for model in self.ml_models:
if model['id'] == id:
self.ml_models.remove(model)
@staticmethod
def classification(model, x, y, h_tune=False):
"""
:param model: название класса для модели классификации (строка) - "Random Forest" или "SVM".
:param x: np.array(): выборка с признаками для обучения.
:param y: np.array(): таргеты.
:param h_tune: boolean: нужен ли подбор гиперпараметров или нет.
:return: model(): обученная модель.
"""
if model == 'Random Forest':
param_grid = {'n_estimators': [50, 100], 'max_depth': [3, 4], 'max_features': ['auto', 'sqrt']}
clf = RandomForestClassifier(random_state=0)
elif model == 'SVM':
param_grid = {'kernel': ('linear', 'rbf'), 'C': [1, 10]}
clf = SVC(random_state=0)
if h_tune:
clf_cv = GridSearchCV(estimator=clf, param_grid=param_grid, cv=5)
clf_cv.fit(x, y)
return clf_cv.best_estimator_
else:
clf.fit(x, y)
return clf
@staticmethod
def regression(model, x, y, h_tune=False):
"""
:param model: название класса для модели регрессии (строка) - "Random Forest" или "SVM".
:param x: np.array(): выборка с признаками для обучения.
:param y: np.array(): таргеты.
:param h_tune: boolean: нужен ли подбор гиперпараметров или нет.
:return: model(): обученная модель.
"""
if model == 'Random Forest':
param_grid = {'n_estimators': [50, 100], 'max_depth': [3, 4], 'max_features': ['auto', 'sqrt']}
lr = RandomForestRegressor(random_state=0)
elif model == 'SVM':
param_grid = {'kernel': ('linear', 'rbf'), 'C': [1, 10]}
lr = SVR()
if h_tune:
lr_cv = GridSearchCV(estimator=lr, param_grid=param_grid, cv=5)
lr_cv.fit(x, y)
return lr_cv.best_estimator_
else:
lr.fit(x, y)
return lr
| true |
ae2bf930152fab4a1b876470d2d1f9ed19b5396f | Python | kidc2458/Python_Tutorial | /try_except.py | UTF-8 | 378 | 3.390625 | 3 | [] | no_license | text = '100%'
try:
number = int(text)
except ValueError:
print('{}는 숫자가 아니네요.'.format(text))
def sage_pop_print(list,index):
try:
print(list.pop(index))
except IndexError:
print('{} index의 값을 가져올 수 없습니다.'.format(index))
sage_pop_print([1,2,3],5)
try:
import my_module2
except ImportError:
print("모듈이 없습니다.") | true |
405f241461ac71f947f38e8b7b194306223e8c69 | Python | crim-ca/stac-ingest | /collections/collection_base.py | UTF-8 | 508 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | from abc import (ABC, abstractmethod)
class CollectionBase(ABC):
"""
Abstract class defining dataset metadata schema processing.
"""
def __init__(self):
pass
@abstractmethod
def add_metadata(self, ds):
"""
"""
pass
@abstractmethod
def get_stac_collection(self, item):
"""
"""
pass
@abstractmethod
def get_stac_collection_item(self, collection_items, collection_name):
"""
"""
pass | true |
5d530b6c3cc1d57df3498afc35660d0a4c284766 | Python | bilalsheikh12324/SSUET-AI | /Untitled.py | UTF-8 | 340 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
print("Information")
# In[10]:
name="Bilal Sheikh"
fathername="Sheikh Hamid"
email="bilal.sheikh50.bs@gmail.com"
age=22
city="karachi"
qualification="B.Sc"
print("Name: ",name)
print ("Father Name: ",fathername)
print("Email: ",email)
print("Age: ",age)
print("City: ",city)
# In[ ]:
# In[ ]:
# In[ ]:
| true |
1272f8c40a285f55d3c15f85aeb326d047064514 | Python | paul-tqh-nguyen/one_off_code | /static_autodiff/tibs/tests/test_front_end_vector_literal.py | UTF-8 | 6,269 | 2.765625 | 3 | [] | no_license | import pytest
from tibs import parser, type_inference
from tibs.ast_node import (
EMPTY_TENSOR_TYPE_AST_NODE,
PrintStatementASTNode,
ComparisonExpressionASTNode,
ExpressionASTNode,
TensorTypeASTNode,
VectorExpressionASTNode,
FunctionDefinitionASTNode,
ForLoopASTNode,
WhileLoopASTNode,
ConditionalASTNode,
ScopedStatementSequenceASTNode,
ReturnStatementASTNode,
BooleanLiteralASTNode,
IntegerLiteralASTNode,
FloatLiteralASTNode,
StringLiteralASTNode,
NothingTypeLiteralASTNode,
VariableASTNode,
NegativeExpressionASTNode,
ExponentExpressionASTNode,
MultiplicationExpressionASTNode,
DivisionExpressionASTNode,
AdditionExpressionASTNode,
SubtractionExpressionASTNode,
NotExpressionASTNode,
AndExpressionASTNode,
XorExpressionASTNode,
OrExpressionASTNode,
GreaterThanExpressionASTNode,
GreaterThanOrEqualToExpressionASTNode,
LessThanExpressionASTNode,
LessThanOrEqualToExpressionASTNode,
EqualToExpressionASTNode,
NotEqualToExpressionASTNode,
StringConcatenationExpressionASTNode,
FunctionCallExpressionASTNode,
AssignmentASTNode,
ModuleASTNode,
) # TODO reorder these in according to their declaration
from tibs.misc_utilities import *
# TODO make sure all these imports are used
TEST_CASES = (
(
'[1,2,3]',
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=1), IntegerLiteralASTNode(value=2), IntegerLiteralASTNode(value=3)]),
TensorTypeASTNode(base_type_name='Integer', shape=[3])
),
(
'[1,2,\
3]',
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=1), IntegerLiteralASTNode(value=2), IntegerLiteralASTNode(value=3)]),
TensorTypeASTNode(base_type_name='Integer', shape=[3])
),
(
'[[1,2,3], [4,5,6], [7,8,9]]',
VectorExpressionASTNode(values=[
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=1), IntegerLiteralASTNode(value=2), IntegerLiteralASTNode(value=3)]),
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=4), IntegerLiteralASTNode(value=5), IntegerLiteralASTNode(value=6)]),
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=7), IntegerLiteralASTNode(value=8), IntegerLiteralASTNode(value=9)])
]),
TensorTypeASTNode(base_type_name='Integer', shape=[3, 3])
),
(
'[[[1,2], [3, 4]], [[5,6], [7,8]]]',
VectorExpressionASTNode(values=[
VectorExpressionASTNode(values=[
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=1), IntegerLiteralASTNode(value=2)]),
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=3), IntegerLiteralASTNode(value=4)])]),
VectorExpressionASTNode(values=[
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=5), IntegerLiteralASTNode(value=6)]),
VectorExpressionASTNode(values=[IntegerLiteralASTNode(value=7), IntegerLiteralASTNode(value=8)])])
]),
TensorTypeASTNode(base_type_name='Integer', shape=[2,2,2])
),
(
'[1.1, 2.2, 3.3]',
VectorExpressionASTNode(values=[FloatLiteralASTNode(value=1.1), FloatLiteralASTNode(value=2.2), FloatLiteralASTNode(value=3.3)]),
TensorTypeASTNode(base_type_name='Float', shape=[3])
),
(
'[1.1 + 2.2, 3.3 * 4.4]',
VectorExpressionASTNode(values=[
AdditionExpressionASTNode(left_arg=FloatLiteralASTNode(value=1.1), right_arg=FloatLiteralASTNode(value=2.2)),
MultiplicationExpressionASTNode(left_arg=FloatLiteralASTNode(value=3.3), right_arg=FloatLiteralASTNode(value=4.4))
]),
TensorTypeASTNode(base_type_name='Float', shape=[2])
),
(
'[True, False, True or False]',
VectorExpressionASTNode(values=[
BooleanLiteralASTNode(value=True),
BooleanLiteralASTNode(value=False),
OrExpressionASTNode(left_arg=BooleanLiteralASTNode(value=True), right_arg=BooleanLiteralASTNode(value=False))]),
TensorTypeASTNode(base_type_name='Boolean', shape=[3])
),
(
'[[[True]]]',
VectorExpressionASTNode(values=[VectorExpressionASTNode(values=[VectorExpressionASTNode(values=[BooleanLiteralASTNode(value=True)])])]),
TensorTypeASTNode(base_type_name='Boolean', shape=[1,1,1])
),
)
@pytest.mark.parametrize('input_string, parse_result, expected_type', TEST_CASES)
def test_parser_vector_literal(input_string, parse_result, expected_type):
del expected_type
module_node = parser.parseSourceCode('x = '+input_string)
assert isinstance(module_node, ModuleASTNode)
assert isinstance(module_node.statements, list)
assignment_node = only_one(module_node.statements)
assert isinstance(assignment_node, AssignmentASTNode)
variable_node, tensor_type_node = only_one(assignment_node.variable_type_pairs)
assert isinstance(variable_node, VariableASTNode)
assert variable_node.name is 'x'
assert isinstance(tensor_type_node, TensorTypeASTNode)
assert tensor_type_node.base_type_name is None
assert tensor_type_node.shape is None
result = assignment_node.value
assert result == parse_result, f'''
input_string: {repr(input_string)}
result: {repr(result)}
parse_result: {repr(parse_result)}
'''
@pytest.mark.parametrize('input_string, parse_result, expected_type', TEST_CASES)
def test_type_inference_vector_literal(input_string, parse_result, expected_type):
del parse_result
module_node = parser.parseSourceCode('x = '+input_string)
module_node = type_inference.perform_type_inference(module_node)
assert isinstance(module_node, ModuleASTNode)
assert isinstance(module_node.statements, list)
assignment_node = only_one(module_node.statements)
assert isinstance(assignment_node, AssignmentASTNode)
variable_node, tensor_type_node = only_one(assignment_node.variable_type_pairs)
assert isinstance(variable_node, VariableASTNode)
assert variable_node.name is 'x'
assert isinstance(tensor_type_node, TensorTypeASTNode)
assert tensor_type_node == expected_type
assert all(EMPTY_TENSOR_TYPE_AST_NODE != node for node in module_node.traverse())
| true |
7f3424a56a2a30a146f1c1ea0d5baaef667121e1 | Python | kaisAlbi/Kais-Albichari-Master-Thesis-2018-2019 | /StateAnalytical/GradientOfSelection.py | UTF-8 | 10,852 | 2.921875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from itertools import product
import pickle
class EGTModel:
def __init__(self, fit_diff_filename, pop_size, group_size, beta, nb_states):
self.Z = pop_size
self.N = group_size
self.beta = beta
self.nb_states = nb_states
self.strategies = self.reducedStrategies(self.createStrategies())
self.fit_diff = self.loadFitDiff(fit_diff_filename)
self.gradient_matrix = self.gradientMatrix()
def getNbStates(self):
return self.nb_states
def getStrategies(self):
return self.strategies
def getBeta(self):
return self.beta
def getPopSize(self):
return self.Z
def getGradientMatrix(self):
return self.gradient_matrix
def loadFitDiff(self, filename):
f = open(filename, "rb")
self.fit_diff_matrix = np.asarray(pickle.load(f,encoding='latin1'))
f.close()
def createStrategies(self):
"""
Generate all possible combination of strategies, depending on the number of states
:return: list of all strategies in the form [T_c,T_d, action state_0, ..., action state_maxState]
transition = 1 = Left ; action = 1 = C
"""
action_choice = list(list(item) for item in product("CD", repeat=self.getNbStates()))
state_change = list(list(item) for item in product("LR", repeat=2))
strats = []
for action in action_choice:
action_c_tr = []
for i in range (len(action)):
if action[i] == "C":
action_c_tr.append(1)
else:
action_c_tr.append(0)
for state_c in state_change:
state_c_tr = []
if state_c[0] == "L":
state_c_tr.append(1)
else:
state_c_tr.append(0)
if state_c[1] == "L":
state_c_tr.append(1)
else:
state_c_tr.append(0)
list_c_tr = [state_c_tr, action_c_tr]
strat = [item for sublist in list_c_tr for item in sublist]
strats.append(strat)
return strats
def reducedStrategies(self, strategies):
all_c = strategies[0]
all_d = all_c[:2] + strategies[-1][2:]
reduced_strats = [all_c]
for strat in strategies:
if not (hasOnlyOneAction(strat) or hasOnlyOneDirection(strat)):
reversed_strat = []
for i in range(2):
reversed_strat.append((strat[i] + 1) % 2) # Bit flip for transitions : LR -> RL
for i in range(len(strat) - 1, 1, -1): # Reverse actions : CDD -> DDC
reversed_strat.append(strat[i])
if reversed_strat not in reduced_strats:
reduced_strats.append(strat)
reduced_strats.append(all_d)
return reduced_strats
def fermiDistrib(self, fit_diff, increase):
"""
:param first_payoff: payoff obtained by the first agent after the interaction
:param second_payoff: payoff obtained by the second agent after the interaction
:param positive: boolean value used to calculate probability increase and decrease (T +-)
:return: probability that the first agent imitates the second ond
"""
if increase:
return 1. / (1. + np.exp(-self.getBeta() * (fit_diff)))
else:
return 1. / (1. + np.exp(self.getBeta() * (fit_diff)))
def probIncDec(self, n_A, fit_diff):
"""
:param n_A: number of invaders
:param inv_strat: invaders' strategy
:param res_strat: residents' strategy
:return: probability to change the number of k invaders (by +- one at each time step)
"""
tmp = ((self.Z - n_A) / Z) * (n_A / Z)
inc = np.clip(tmp * self.fermiDistrib(fit_diff, True), 0., 1.)
dec = np.clip(tmp * self.fermiDistrib(fit_diff, False), 0., 1.)
return [inc, dec]
def gradientOfSelection(self, k, fit_diff):
inc, dec = self.probIncDec(k, fit_diff)
return inc - dec
def gradientArray(self, res_index, inv_index):
"""
:param res_index: resident index
:param inv_index: invader index
:return: fixation probability of the invader in a population of residents
"""
fit_diff_array = self.fit_diff_matrix[res_index, inv_index]
fit_diff_solo = self.fit_diff_matrix[inv_index, inv_index,0]
gradient_array = np.zeros(Z+1)
for i in range(0, self.Z+1):
if i != Z:
gradient_array[i] = self.gradientOfSelection(i, fit_diff_array[i])
else:
gradient_array[i] = self.gradientOfSelection(i, fit_diff_solo)
return gradient_array
def gradientMatrix(self):
"""
Compute the fixation probability for each pair invader-resident of strategies and build the fixation probabilities
matrix and the transition matrix
:return: transition matrix and fixation probabilities matrix
"""
strats = self.getStrategies()
n = len(strats)
gradient_mat = np.zeros((n,n, self.Z+1))
for i in range(n):
for j in range(n):
if i != j:
gradient_mat[i,j] = self.gradientArray(i,j)
return gradient_mat
def makeXTicks(strats):
"""
Transforms the binary strategies into 'x_ticks' to plot them on a graph
:param strats: array of binary strategies
:return: array of transformed strategies
"""
x_ticks = []
for strat in strats:
x_tick = []
for i in range (len(strat)):
if i == 0 or i == 1:
if strat[i] == 1:
x_tick.append("L")
else:
x_tick.append("R")
else:
if strat[i] == 1:
x_tick.append("C")
else:
x_tick.append("D")
x_ticks.append(x_tick)
return ["".join(map(str, x_ticks[i])) for i in range(len(strats))]
def plotGradientSelection2states(game, N, egt_states):
"""
Plot the gradient of selection
:param game: evolutionary game being played
:param N: group size
:param egt_states: system
"""
gradients_mat = egt_states.getGradientMatrix()
strats = egt_states.getStrategies()
n = len(strats)
x = [i for i in range(len(gradients_mat[0,0]))]
x = [x[i]/len(x) for i in range (len(x))]
print(len(x))
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(3,6))
fig.suptitle(game + " - group size : " + str(N))
ax1.set_ylabel("gradient of selection (G)")
ax1.set_xlabel("fraction of invaders (k/Z)")
ax2.set_ylabel("gradient of selection (G)")
ax2.set_xlabel("fraction of invaders (k/Z)")
strat_1 = [1,0,1,0] #LRCD
index_1 = strats.index(strat_1)
strat_2 = [0,1,1,0] #RLCD
index_2 = strats.index(strat_2)
lstyle1 = ["o", "-", "+"]
k1=0
lstyle2 = ["o", "-", "+"]
k2 = 0
color = ["blue", "green", "red"]
for i in range (n):
res_strat = strats[i]
if res_strat != strat_1:
cur_label = "invader " + "".join(displayableStrat(strat_1)) + " - " + "resident " + "".join(
displayableStrat(strats[i]))
ax1.plot(x, gradients_mat[index_1, i], lstyle1[k1], label=cur_label, color=color[k1])
#ax1.plot(x, gradients_mat[index_1, i], label=cur_label, color=color[k1])
k1 += 1
if res_strat != strat_2:
cur_label = "invader " + "".join(displayableStrat(strat_2)) + " - " + "resident " + "".join(
displayableStrat(strats[i]))
ax2.plot(x, gradients_mat[index_2, i], lstyle2[k2], label=cur_label, color=color[k2])
#ax2.plot(x, gradients_mat[index_2, i], label=cur_label, color=color[k2])
k2 += 1
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
legend1 = ax1.legend(loc='lower left')
legend2 = ax2.legend(loc='lower left')#,bbox_to_anchor=(1.15,1))
plt.show()
def displayableStrat(strat):
"""
Transforms a binary strategy array into a human readable strategy array
:param strat: strategy array
:return: human readable strategy array
"""
displayable_strat = []
for i in range (len(strat)):
if strat[i] == 1:
if i < 2:
displayable_strat.append("L")
else:
displayable_strat.append("C")
else:
if i < 2:
displayable_strat.append("R")
else:
displayable_strat.append("D")
return displayable_strat
def getStationaryFromFile(filename):
f = open(filename, "r")
stationary = []
for line in f.readlines():
index = line.index(": ")
stationary_i = line[index + 1:]
stationary.append(float(stationary_i))
r = 1 - sum(stationary) # Handle loss of precision
stationary[0] += r
f.close()
return stationary
def storeStationary(filename, strats, stationary):
with open(filename, "w") as f:
for i in range(len(strats)):
line = "".join(map(str, strats[i])) + " : " + str(stationary[i])
if i < (len(strats) - 1):
line += "\n"
f.write(line)
def hasOnlyOneDirection(strat):
"""
:param strat: strategy
:return: True if the strategy given as argument has only one possible transition direction, False otherwise
"""
directions = strat[:2]
if 1 not in directions or 0 not in directions:
return True
return False
def hasOnlyOneAction(strat):
"""
:param strat: strategy
:return: True if the strategy given as argument has only one possible action choice, False otherwise
"""
actions = strat[2:]
if 1 not in actions or 0 not in actions:
return True
return False
if __name__ == '__main__':
game = "SH"
Z = 150
N = 150
beta = 0.05
nb_states = 2
#fit_diff_filename = "FitDiff/" + game + "/analytical_groups/fit_diff_" + str(nb_states) + "st_groupsize" + str(N) + ".pickle"
fit_diff_filename = "FitDiff/" + game + "/Reduced/fit_diff_" + str(nb_states) + "st_groupsize" + str(N) + ".pickle"
#fit_diff_filename = "FitDiff/" + game + "/analytical_groups/fit_diff_LRCD_RLCD_groupsize" + str(N) + ".pickle"
egt_states = EGTModel(fit_diff_filename, Z, N, beta, nb_states)
print(egt_states.getStrategies())
plotGradientSelection2states(game, N, egt_states)
#plotGradientSelectionCoopDefect(game, N, egt_states) | true |
ce1f51e58fec99ffce67e2d491a21ab4b1d3d067 | Python | alejocbs/ENPM661-Project3_Phase_2 | /A_star.py | UTF-8 | 1,983 | 3.0625 | 3 | [] | no_license | import time
from Functions import *
from finalmap import *
userdefined = False
if userdefined:
start_nodex = int(input("Please enter Start point X coordinate: "))
start_nodey = int(input("Please enter Start point Y coordinate: "))
goal_nodex = int(input("Please enter Goal point X coordinate: "))
goal_nodey = int(input("Please enter Goal point Y coordinate: "))
clearance = int(input("Please enter the Radius of the robot"))
step = int(input("Please enter the value of the step"))
thresho =int(input("Please enter the threshold"))
radius =int(input("Please enter the radius"))
# start_angle = int(input("Please enter the value of the step"))
# goal_angle =int(input("Please enter the value of the step"))
else:
start_nodex = 50
start_nodey = 30
goal_nodex = 295
goal_nodey = 195
clearance = 1
d = 1
thresho = 1
radius = 1
start_angle = 60
goal_angle = 30
d=5
start_pos = (start_nodex, start_nodey, start_angle)
goal_pos = (goal_nodex, goal_nodey, goal_angle)
plt.plot(start_nodex, start_nodey, "Dr")
plt.plot(goal_nodex, goal_nodey, "Dr")
start_time = time.time()
if __name__ == '__main__':
final_obs, wall_x, wall_y = finalmap(clearance)
if isobstacle(clearance, start_nodex, start_nodey, thresho):
print("Start Position in obstacle space")
elif isobstacle(clearance, goal_nodex, goal_nodey, thresho):
print("goal Position in obstacle space")
else:
path = Astar(start_pos, goal_pos, d, clearance,thresho)
print(path)
if path is not None:
scatterx = [x[0] for x in path]
scattery = [x[1] for x in path]
plt.plot(scatterx, scattery, color='r', linewidth=4)
plt.savefig('path_rigid.png')
plt.show()
elapsed_time = time.time() - start_time
print("Time Required to Solve ", round(elapsed_time, 2), "seconds")
else:
print("No path found")
| true |
8423419989d4969c0d54e36c068896c1a0c59018 | Python | ecaterinacatargiu/FP | /Assignment5-7/57/repository/BookRepository.py | UTF-8 | 2,293 | 3.21875 | 3 | [] | no_license | from domain.Book import Book
from repository.RepositoryException import *
from DataStructure.IterableStructure import *
class BookRepository:
"""Repository for storing domain objects"""
def __init__(self):
self._books = DataStructure()
def __len__(self):
return len(self._books)
def addBook(self, book):
if book in self._books:
raise RepositoryError("The element already exists")
self._books.append(book)
#print(self._books)
def removeBook(self, bookID):
n=0
for book in self._books:
if book.getBookID == bookID:
n+=1
if(n==0):
raise RepositoryError("The element does not exist!")
i=0
for book in self._books:
if book.getBookID == bookID:
rbook = self._books[i]
del self._books[i]
i+=1
return rbook
def updateBook(self, book):
n = 0
for book1 in self._books:
id1 = book1.getBookID
id2 = book.getBookID
if id1 == id2:
n += 1
if (n == 0):
raise RepositoryError("The element does not exist!")
for book1 in self._books:
id1 = book1.getBookID
id2 = book.getBookID
if id1 == id2:
self.removeBook(id2)
self.addBook(book)
def listBooks(self):
for i in range(len(self._books)):
return i
def searchBook(self, bookID):
n = 0
for book in self._books:
if book.getBookID == bookID:
n += 1
if (n == 0):
raise RepositoryError("The element does not exist!")
for book in self._books:
if book.getBookID == bookID:
return book
def getBookByID(self, bookID):
n = 0
for book in self._books:
if book.getBookID == bookID:
n += 1
if (n == 0):
raise RepositoryError("The element does not exist!")
for book in self._books:
if book.getBookID == bookID:
return book
def getAll(self):
return self._books
def store(self, book):
self._books.append(book)
| true |
9e6958cb33ab83ad0c03863cb3d20fd95b0ab8c4 | Python | xz1082/assignment5 | /mz948/assignment5.py | UTF-8 | 3,925 | 3.546875 | 4 | [] | no_license | import re
class interval:
def __init__(self, interval_string = ''):
self.string = interval_string
if interval_string:
self.lower, self.upper = map(int, interval_string.strip('[] ()').split(','))
if (interval_string[0] == '[') and (interval_string[-1] == ']') and (self.lower <= self.upper):
self.lower_type = '['
self.upper_type = ']'
self.range = range(self.lower, self.upper + 1)
elif (interval_string[0] == '[') and (interval_string[-1] == ')') and (self.lower < self.upper):
self.lower_type = '['
self.upper_type = ')'
self.range = range(self.lower, self.upper)
elif (interval_string[0] == '(') and (interval_string[-1] == ']') and (self.lower < self.upper):
self.lower_type = '('
self.upper_type = ']'
self.range = range(self.lower + 1, self.upper + 1)
elif (interval_string[0] == '(') and (interval_string[-1] == ')') and (self.lower < self.upper - 1):
self.lower_type = '('
self.upper_type = ')'
self.range = range(self.lower + 1, self.upper)
else:
raise Exception('Invalid interval.')
def __repr__(self):
return self.string
def mergeIntervals(int1, int2):
range1 = int1.range
range2 = int2.range
#assign the lower_types and upper_types for interval1 and interval2
lt1 = int1.lower_type
lt2 = int2.lower_type
ut1 = int1.upper_type
ut2 = int2.upper_type
l1 = int1.lower
u1 = int1.upper
l2 = int2.lower
u2 = int2.upper
if (range1[-1] + 1 < range2[0]) or (range2[-1] + 1 < range1[0]):
raise Exception ('These two intervals are not overlapping.')
elif range1[-1] +1 == range2[0]:
merged = interval(lt1+ str(l1) + ',' + str(u2) + ut2)
elif range2[-1] + 1== range1[0]:
merged = interval(lt2+ str(l2) + ',' + str(u1) + ut1)
else:
if (range1[0] >= range2[0]) and (range1[-1] <= range2[-1]):
merged_interval = range2
merged = int2
elif (range1[0] >= range2[0]) and (range1[-1] >= range2[-1]):
merged_interval = range(range2[0], range1[-1])
merged = interval(lt2+ str(l2)+','+ str(u1) + ut1)
elif (range1[0] <= range2[0]) and (range1[-1] >= range2[-1]):
merged_interval = range1
merged = int1
elif (range1[0] <= range2[0]) and (range1[-1] <= range2[-1]):
merged_interval = range(range1[0], range2[-1])
merged = interval(lt1+ str(l1) + ', ' + str(u2) + ut2)
return merged
def mergeOverlapping(intlist):
n = len(intlist)
# sort the intervals
intlist.sort(key = lambda x: x.lower)
for i in xrange(0, n - 1):
j = i + 1
try:
intlist[j] = mergeIntervals(intlist[i], intlist[j])
intlist[i] = ''
except:
pass
return [intlist[i] for i in range(len(intlist)) if intlist[i] != '']
def insert(intlist, newint):
intlist.append(newint)
return mergeOverlapping(intlist)
def main():
#global variable intlist
intlist = []
input_interval = raw_input('List of intervals?')
try:
intlist = [interval(x) for x in re.split('(?<=[\]\)])\s*,\s*', input_interval.strip())]
except:
print 'Invalid interval'
while True:
new_input = raw_input('Interval?')
if new_input == 'quit':
break
else:
try:
new_input = interval(new_input.strip())
intlist = insert(intlist,new_input)
except:
print 'Invalid interval!'
continue
print intlist
if __name__ == '__main__':
main()
| true |
e2847efff72fdae80f52f8beb035ab1692bb007b | Python | poweredhoward/nba-salary-stats | /app/views.py | UTF-8 | 11,375 | 2.609375 | 3 | [] | no_license | import os
import glob
from flask import current_app as application
from flask import request as flask_request
from flask import render_template
from flask import Response
from numpy import histogram
import numpy as np
from joblib import dump, load
from sqlalchemy import and_
import csv
import pandas as pd
import seaborn as sns
import io
import matplotlib.image
import matplotlib
matplotlib.use('Agg')
import uuid
from app.models.salary import Salary
from app.models.stats import Stats
from app import db
TODAYS_SALARY_CAP = 101869000
pd.set_option('float_format', '{:.2f}'.format)
# TODO: Add exception handling!!!
# Main dashboard presentation
@application.route('/')
def index():
all_data = entire_dataset()
# Choose statistic fields that will be selectable
stat_options = stats_column_mapping.items()
stat_options = list(stat_options)[6:]
# Choose years that will be selectable
years = db.session.query(Stats)\
.distinct(Stats.season)\
.filter(Stats.season > 1995)\
.order_by(Stats.season.desc()).all()
year_options = [ year.season for year in years ]
position_options = ['PG', 'SG', 'G', 'SF', 'PF', 'F', 'C', 'All']
# The "core statistics" that are the best predictors of salary
optimized_fields = all_data[['% of cap', 'salary', 'cap', 'age', 'per', 'ppg', 'min_per_game', 'def_reb_per_game', 'fg_per_game', 'fga_per_game']]
# Clean/wrangle data so it looks better
described = optimized_fields.describe()
described.columns = ['% of Cap', 'Salary', 'Cap', 'Age', 'PER', 'PPG', 'MPG', 'DRPG', 'FG Made Per Game', 'FGA Per Game']
described['Salary'] = described['Salary'].apply(lambda x: "${0:,.2f}".format(x))
described['Cap'] = described['Cap'].apply(lambda x: "${0:,.2f}".format(x))
relevant_data = described.loc[["mean", "std", "min", "25%", "50%", "75%", "max"]]
return render_template(
'dashboard.html',
stat_options=stat_options,
year_options = year_options[1:],
position_options = position_options,
description_table_data=[relevant_data.to_html(classes="table table-striped", header="false")],
description_table_cols = relevant_data.columns.values
)
# Endpoint to delete pictures from the /generated folder
@application.route('/clear-pictures/sdfpgup9fdsfsd9f2345', methods=['GET'])
def clear_pics():
files = glob.glob("./app/static/generated/*")
for f in files:
os.remove(f)
return "success"
# Get salary prediction
@application.route('/prediction/salary', methods=['POST'])
def get_prediction():
params = flask_request.get_json(force=True)
print(params)
input = np.array([[
float(params['age']),
float(params['per']),
float(params['ppg']),
float(params['min_per_game']),
float(params['def_reb_per_game']),
float(params['fg_per_game']),
float(params['fga_per_game'])
]])
model = load('random_forest_model_2000_final.joblib')
salary_prediction = model.predict(input)
raw_prediction = (float(salary_prediction[0]) / 100) * TODAYS_SALARY_CAP
formatted_float = "${:,.2f}".format(raw_prediction)
return {
"salary_prediction": formatted_float
}
# Used in application deployment, seed DB with initial values
@application.route('/seed/6516854352asdffsdg')
def get():
seed_salary_table()
seed_stats_table()
return {"hello": "world"}
# Get both visualizations
@application.route('/getVisual/scatterplot', methods=['POST'])
def post():
params = flask_request.get_json(force=True)
stat_selected = str(params['stat_selected'])
year_selected = int(params['year_selected'])
position_selected = "%{}%".format(str(params['position_selected']))
img = io.BytesIO()
filters = [Salary.season_start > year_selected]
if position_selected and "All" not in position_selected:
filters.append((Stats.position.like(position_selected)))
f = and_(*filters)
results1 = db.session\
.query(Stats.player_name, Salary.salary, Stats.season, Stats.team, getattr(Stats,stat_selected) )\
.join(Salary, and_(Stats.season==Salary.season_start, Stats.player_name==Salary.player_name))\
.filter(f)\
.limit(4000)
df = pd.read_sql(results1.statement, db.session.bind)
# TODO: Set boundaries of Y axis for max salary
scatterplot = sns.lmplot(
data = df,
x=stat_selected,
y="salary"
)
file_path, scatterplot_filename = generate_filename()
scatterplot.savefig("{}".format(file_path))
histogram = sns.displot(df["salary"], bins=100)
file_path, histogram_filename = generate_filename()
histogram.savefig("{}".format(file_path))
return {
"scatterplot_filename": scatterplot_filename,
"histogram_filename": histogram_filename
}
def generate_filename():
filename = uuid.uuid4().hex
return "app/static/generated/{}.png".format(filename), "static/generated/{}.png".format(filename)
# Returns all raw CSV data together
def entire_dataset():
stats_dataset = pd.read_csv("salary-data_2000.csv")
stats_dataset.dropna()
stats_dataset['ppg'] = stats_dataset['points'] / stats_dataset['games_played']
stats_dataset['min_per_game'] = stats_dataset['min_played'] / stats_dataset['games_played']
stats_dataset['off_reb_per_game'] = stats_dataset['off_reb'] / stats_dataset['games_played']
stats_dataset['assists_per_game'] = stats_dataset['assists'] / stats_dataset['games_played']
stats_dataset['reb_per_game'] = stats_dataset['reb'] / stats_dataset['games_played']
stats_dataset['off_reb_per_game'] = stats_dataset['off_reb'] / stats_dataset['games_played']
stats_dataset['def_reb_per_game'] = stats_dataset['def_reb'] / stats_dataset['games_played']
stats_dataset['ft_per_game'] = stats_dataset['ft'] / stats_dataset['games_played']
stats_dataset['fta_per_game'] = stats_dataset['fta'] / stats_dataset['games_played']
stats_dataset['fg_per_game'] = stats_dataset['fg'] / stats_dataset['games_played']
stats_dataset['fga_per_game'] = stats_dataset['fga'] / stats_dataset['games_played']
stats_dataset['blocks_per_game'] = stats_dataset['blocks'] / stats_dataset['games_played']
stats_dataset['steals_per_game'] = stats_dataset['steals'] / stats_dataset['games_played']
stats_dataset['turnovers_per_game'] = stats_dataset['turnovers'] / stats_dataset['games_played']
stats_dataset['fouls_per_game'] = stats_dataset['fouls'] / stats_dataset['games_played']
salary_caps = pd.read_csv("salary-cap.csv", index_col=0).to_dict()
cap = salary_caps['Salary Cap']
stats_dataset['cap'] = stats_dataset['season'].map(cap)
stats_dataset["% of cap"] = (stats_dataset['salary']/stats_dataset['cap'])*100
return stats_dataset
optimized_fields = ['% of cap', 'age', 'per', 'ppg', 'min_per_game', 'def_reb_per_game', 'fg_per_game', 'fga_per_game']
# Ignore
"""
select
distinct
sal.salary,
stats.age,
stats.games_played,
stats.games_started,
stats.min_played,
stats.per,
stats.true_shooting,
stats.three_pt_att_rate,
stats.ft_rate,
stats.off_reb_perc,
stats.def_reb_perc,
stats.total_reb_perc,
stats.assist_perc,
stats.steal_perc,
stats.block_perc,
stats.to_perc,
stats.usage_perc,
stats.offensive_win_shares,
stats.defensive_win_shares,
stats.win_shares,
stats.win_shares_per_48,
stats.offensive_box_p_m,
stats.defensive_box_p_m,
stats.box_p_m,
stats.var,
stats.fg,
stats.fga,
stats.fg_perc,
stats.three_pt_fg,
stats.three_pt_fga,
stats.three_pt_fg_perc,
stats.two_pt_fg,
stats.two_pt_fga,
stats.two_pt_fg_perc,
stats.effective_fg_perc,
stats.ft,
stats.fta,
stats.ft_perc,
stats.off_reb,
stats.def_reb,
stats.reb,
stats.assists,
stats.steals,
stats.blocks,
stats.turnovers,
stats.fouls,
stats.points
from player_salary AS sal
INNER JOIN player_stats AS stats
ON sal.season_start=stats.season AND sal.player_name=stats.player_name
"""
# Ignore
"""
select * from player_salary as sal
inner join player_stats stats on stats.player_name = sal.player_name and stats.season = sal.season_start
where season_start < 2010
and sal.player_name = 'Tracy McGrady'
order by salary desc
select * from player_salary
where player_name LIKE '%Tracy%'
"""
# Mapping of displayed column names to names inside data strcuture
salary_column_mapping = {
'Register Value': "data_id",
'Player Name':"player_name",
'Salary in $':"salary",
'Season Start':"season_start",
'Season End': "season_end",
'Team': "team_short",
'Full Team Name':"team_full"
}
stats_column_mapping = {
"Index": "data_id",
"Year": "season",
"Player": "player_name",
"Pos": "position",
"Age": "age",
"Tm": "team",
"G": "games_played",
"GS": "games_started",
"MP": "min_played",
"PER": "per",
"TS%": "true_shooting",
"3PAr": "three_pt_att_rate",
"FTr": "ft_rate",
"ORB%": "off_reb_perc",
"DRB%": "def_reb_perc",
"TRB%": "total_reb_perc",
"AST%": "assist_perc",
"STL%": "steal_perc",
"BLK%": "block_perc",
"TOV%": "to_perc",
"USG%": "usage_perc",
"OWS": "offensive_win_shares",
"DWS": "defensive_win_shares",
"WS": "win_shares",
"WS/48": "win_shares_per_48",
"OBPM": "offensive_box_p_m",
"DBPM": "defensive_box_p_m",
"BPM": "box_p_m",
"VORP": "var",
"FG": "fg",
"FGA": "fga",
"FG%": "fg_perc",
"3P": "three_pt_fg",
"3PA": "three_pt_fga",
"3P%": "three_pt_fg_perc",
"2P": "two_pt_fg",
"2PA": "two_pt_fga",
"2P%": "two_pt_fg_perc",
"eFG%": "effective_fg_perc",
"FT": "ft",
"FTA": "fta",
"FT%": "ft_perc",
"ORB": "off_reb",
"DRB": "def_reb",
"TRB": "reb",
"AST": "assists",
"STL": "steals",
"BLK": "blocks",
"TOV": "turnovers",
"PF": "fouls",
"PTS": "points"
}
def seed_salary_table():
with open('app/static/data/player_salaries.csv') as salary_file:
csv_reader = csv.DictReader(salary_file, delimiter=',')
line_count = 0
for row in csv_reader:
player_dict = {}
for k, v in row.items():
key, val = map_salaries(k, v)
player_dict[key] = val
player_salary = Salary(**player_dict)
db.session.add(player_salary)
db.session.commit()
def seed_stats_table():
with open('app/static/data/player_stats.csv') as stats_file:
csv_reader = csv.DictReader(stats_file, delimiter=',')
for row in csv_reader:
player_dict = {}
for k, v in row.items():
if k not in stats_column_mapping or not k or not v:
continue
key, val = map_stats(k, v)
player_dict[key] = val
player_stats = Stats(**player_dict)
db.session.add(player_stats)
db.session.commit()
def map_salaries(key, value):
if not value or len(value) == 0:
return None
return salary_column_mapping[key], value
def map_stats(key, value):
if not value or len(value) == 0:
return None
return stats_column_mapping[key], value | true |
5a47321cd567881f9ae07080970ceb78b55beb02 | Python | XX-O2/shiyanlou_python_challenge | /挑战3:工资计算器读写数据文件/calculator.py | UTF-8 | 2,283 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env pytho3
import sys
import csv
import json
def shebao_calculate(num,total,min,max):
num = float(num)
if num<min:
num_1 = min
elif num>max:
num_1 = max
else:
num_1 = num
shebao_money = num_1 * total
should_tax = num-shebao_money-5000
if should_tax<=0:
tax = 0
elif 0<should_tax<=3000:
tax = should_tax*0.03
elif 3000<should_tax<=12000:
tax = should_tax*0.1-210
elif 12000<should_tax<=25000:
tax = should_tax*0.2-1410
elif 25000<should_tax<=35000:
tax = should_tax*0.25-2660
elif 35000<should_tax<=55000:
tax = should_tax*0.3-4410
elif 55000<should_tax<=80000:
tax = should_tax*0.35-7160
else:
tax = should_tax*0.45-15160
gongzi = num-shebao_money-tax
return shebao_money,tax,gongzi
def calculate(user,total,min,max):
user_info = []
id = user[0]
num = user[1]
a, b, c = shebao_calculate(num,total,min,max)
user_info.append(int(id))
user_info.append(int(num))
user_info.append('{:.2f}'.format(float(a)))
user_info.append('{:.2f}'.format(float(b)))
user_info.append('{:.2f}'.format(float(c)))
return user_info
if __name__ == "__main__":
# ??????
args = sys.argv[1:]
index_c = args.index('-c')
configfile = args[index_c + 1]
index_d = args.index('-d')
userfile = args[index_d + 1]
index_o = args.index('-o')
gongzifile = args[index_o + 1]
# ??????
with open(configfile, 'r') as f2:
datas = f2.readlines()
config_dict = {}
for data in datas:
ID, NUM = data.strip().split("=")
config_dict[ID.strip()] = NUM.strip()
total = 0
for key, value in config_dict.items():
if key == "JiShuL":
min = float(value)
elif key == "JiShuH":
max = float(value)
else:
total += float(value)
# ????????
with open(userfile, 'r') as f1:
users = list(csv.reader(f1))
users_info = []
for user in users:
user_info = calculate(user,total,min,max)
users_info.append(user_info)
with open(gongzifile,'w') as f3:
csv.writer(f3).writerows(users_info)
| true |
ca83fedf2e82cd13d25b12c3cccd9f182f6242dd | Python | timkaboya/python-web-crawler | /test.py | UTF-8 | 1,052 | 2.59375 | 3 | [] | no_license | from crawler import crawl_web, compute_ranks
from search import lucky_search, ordered_search
def test_engine():
index, graph = crawl_web('http://www.udacity.com/cs101x/index.html')
ranks = compute_ranks(graph)
#print index
print "_+_+_+_++_+_++_+_+_+_+_++_+_+_++"
print lucky_search(index, ranks, 'walking')
#>>> https://www.udacity.com/cs101x/index.html
print lucky_search(index, ranks, 'kicking')
#>>> https://www.udacity.com/cs101x/crawling.html
print lucky_search(index, ranks, 'Ossifrage')
#>>> https://www.udacity.com/cs101x/flying.html
print lucky_search(index, ranks, 'ossifrage')
#>>> None
print "_+_+_+_++_+_++_+_+_+_+_++_+_+_++"
print ordered_search(index, ranks, 'to')
#>>> https://www.udacity.com/cs101x/index.html
print ordered_search(index, ranks, 'Ossifrage')
#>>> https://www.udacity.com/cs101x/flying.html
print ordered_search(index, ranks, 'crawl')
#>>> index crawling
print ordered_search(index, ranks, 'ossifrage')
#>>> None
test_engine() | true |
c58a08af42469e225140339e8174a6fa1075d251 | Python | esddse/SDP | /SDP_failed/nn.py | UTF-8 | 5,107 | 2.71875 | 3 | [] | no_license | import random
from math import *
from util import *
from loss import *
import numpy as np
# x is a scalar
def sigmoid(x):
return 1. / (1. + np.exp(-x))
# x is a scalar
def d_sigmoid(x):
return x * (1. - x)
# x is a scalar
def d_tanh(x):
return 1. - x ** 2
# x is a vector
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
# x is a vector
def relu(x):
return np.array(list(map(lambda num: 0 if num <= 0 else num, x)))
def d_relu(x):
return np.array(list(map(lambda num: 0 if num <= 0 else 1, x)))
# x is a scalar
# None activation function
def nochange(x):
return x
def d_nochange(x):
return 1
# random vector values in [a,b) shape args
def random_vector(a, b, *args):
np.random.seed(0)
return np.random.rand(*args) * (b-a) + a
###################################### embedding ##################################
###################################################################################
class embedding_unit_pretrained(object):
def __init__(self, path):
self.dic = {}
self.x_dim = None
self.embedding_dim = None
with open(path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip().split(' ')
vector = np.array(list(map(float, line[1:])))
self.embedding_dim = len(vector)
self.dic[line[0]] = vector
self.x_dim = len(self.dic)
def get_embedding(self, x):
if x in self.dic:
return self.dic[x]
else:
return np.zeros(self.embedding_dim)
class embedding_param(object):
def __init__(self, embedding_dim, x_list):
self.embedding_dim = embedding_dim
self.x_list = x_list
self.x_dim = len(x_list)
# embedding dict
self.W = random_vector(-0.1, 0.1, embedding_dim, self.x_dim)
self.d_W = np.zeros((embedding_dim, self.x_dim))
def update(self, learning_rate, div_num):
def clip_v(x_v):
for i in range(len(x_v)):
x_v[i] = min(max(x_v[i], 1.0), -1.0)
return x_v
def clip_m(x_m):
for x_v in x_m:
clip_v(x_v)
return x_m
self.W -= learning_rate * self.d_W / div_num
clip_m(self.W)
# reset
self.d_W = np.zeros_like(self.W)
def one_hot(self, x):
vector = np.zeros(self.x_dim)
if x in self.x_list:
vector[self.x_list.index(x)] = 1.
return vector
class embedding_unit(object):
def __init__(self, param, x):
self.param = param
self.x = x
self.one_hot = param.one_hot(x)
self.embedding = np.dot(param.W, self.one_hot)
def get_embedding(self):
return self.embedding
def backward(self, d_e):
self.param.d_W += np.outer(d_e, self.one_hot)
################################### nn #################################
########################################################################
class full_connected_param(object):
def __init__(self, h_dim, x_dim):
self.h_dim = h_dim
self.x_dim = x_dim
self.W = random_vector(-0.1, 0.1, h_dim, x_dim)
self.b = random_vector(-0.1, 0.1, h_dim)
self.d_W = np.zeros((h_dim, x_dim))
self.d_b = np.zeros(h_dim)
def update(self, learning_rate, div_num, clip=False):
def clip_v(x_v):
for i in range(len(x_v)):
x_v[i] = min(max(x_v[i], 1.0), -1.0)
return x_v
def clip_m(x_m):
for x_v in x_m:
clip_v(x_v)
return x_m
self.W -= learning_rate * self.d_W / div_num
self.b -= learning_rate * self.d_b / div_num
if clip:
clip_m(self.W)
clip_v(self.b)
# reset
self.d_W = np.zeros_like(self.W)
self.d_b = np.zeros_like(self.b)
class full_connected_layer(object):
def __init__(self, param, activation):
self.param = param
# activation function
if activation == sigmoid:
self.activation = sigmoid
self.d_activation = d_sigmoid
elif activation == tanh:
self.activation = np.tanh
self.d_activation = d_tanh
elif activation == relu:
self.activation = relu
self.d_activation = d_relu
else:
self.activation = nochange
self.d_activation = d_nochange
def forward(self, x):
self.x = x
self.a = self.activation(np.dot(self.param.W, x) + self.param.b)
return self.a
def backward(self, d_a):
d_input = self.d_activation(self.a) * d_a
self.param.d_W += np.outer(d_input, self.x)
self.param.d_b += d_input
# return d_X
return np.dot(self.param.W.T, d_input)
def backward_with_loss(self, y, loss_layer):
# loss_layer
pred_prob, loss, d_a = loss_layer.full_step(self.a, y)
return pred_prob, loss, self.backward_without_loss(d_a)
def main():
np.random.seed(10)
h_dim = 2
embedding_dim = 10
x_dim = 3
e_param = embedding_param(embedding_dim, [0,1,2,3,4,5,6,7,8,9])
unit_3 = embedding_unit(e_param, 3)
nn_param = full_connected_param(h_dim, embedding_dim)
layer1 = full_connected_layer(nn_param, None)
softmax_layer = softmax_loss_layer(['a','b','c'], h_dim)
for cur_iter in range(100):
print("cur iter: ", cur_iter)
xs = layer1.forward(unit_3.get_embedding())
pred_label, pred_prob, loss , d_h = softmax_layer.full_step(xs, 'c')
print(softmax_layer.one_hot('c'))
d_x = layer1.backward(d_h)
print(d_x)
unit_3.backward(d_x)
print("y_pred : ", pred_prob)
print("loss: ", loss)
nn_param.update(0.1)
e_param.update(0.1)
if __name__ == "__main__":
main()
| true |
a63efcf9bc738b5960860d1d1462660895128b81 | Python | ksenia-krasheninnikova/cnv_scripts | /draw_hist | UTF-8 | 1,334 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
import bisect
import sys
import argparse
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
def parse_values(path):
data = []
with open(path) as f:
for line in f:
line = line.strip().split(' ')
if line[1] == '0.00':
continue
name=line[0].split('chr')
data.append((int(name[1]),100*float(line[1])))
data = sorted(data,key=lambda x:x[0])
return data
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument('path')
args = parser.parse_args()
pp=PdfPages('./chrom_seg_dup_cov_yak.pdf')
#pp=PdfPages('./chrom_seg_dup_cov.pdf')
data = parse_values(args.path)
plt.xlabel('Proportion of segmental duplications (%)')
#plt.title('Common segdups in all samples')
plt.title('Common segdups in Yakutsk samples')
y_axis = np.arange(1, len(data) + 1, 1)
plt.barh(y_axis, map(lambda x:x[1],data), align='center', facecolor='firebrick', alpha=0.5)
#'springgreen'(pskov), 'darkorange'(novgorod), 'firebrick'(yakutsk), 'gray'(all)
plt.yticks(y_axis, map(lambda x:'chr'+str(x[0]),data))
plt.legend(fontsize=10)
plt.savefig(pp, format='pdf')
pp.close()
| true |
d107cfcb07d9e59ea8adebef48fc652c4b5aded3 | Python | al4/flickrpub | /flickrpub/exif.py | UTF-8 | 845 | 2.546875 | 3 | [] | no_license | import logging
import piexif
logger = logging.getLogger('flickrpub.exif')
# http://www.exiv2.org/tags.html
class ExifReader(object):
""" Extracting exif information from files """
def __init__(self, file):
self.file = file
self._data = None
def _load(self):
self._data = piexif.load(self.file)
def _read_exif_property(self, prop):
if self._data is None:
self._load()
return self._data['Exif'][prop]
@property
def exif_data(self):
if self._data is None:
self._load()
return self._data['Exif']
@property
def zeroth_data(self):
if self._data is None:
self._load()
return self._data['0th']
@property
def datetime_original(self):
return self.exif_data[piexif.ExifIFD.DateTimeOriginal]
| true |
738d5b03806b64a31782c4dc56d58098bb1a8ce1 | Python | lemonez/udacity-neo-project | /write.py | UTF-8 | 2,997 | 3.234375 | 3 | [] | no_license | """Write a stream of close approaches to CSV or to JSON.
This module exports two functions: `write_to_csv` and `write_to_json`, each of
which accept an `results` stream of close approaches and a path to which to
write the data.
These functions are invoked by the main module with the output of the `limit`
function and the filename supplied by the user at the command line. The file's
extension determines which of these functions is used.
You'll edit this file in Part 4.
"""
import csv
import json
import helpers
def write_to_csv(results, filename):
"""Write an iterable of `CloseApproach` objects to a CSV file.
The precise output specification is in `README.md`. Roughly, each output row
corresponds to the information in a single close approach from the `results`
stream and its associated near-Earth object.
:param results: An iterable of `CloseApproach` objects.
:param filename: A Path-like object pointing to where the data should be saved.
"""
fieldnames = ('datetime_utc', 'distance_au', 'velocity_km_s', 'designation',
'name', 'diameter_km', 'potentially_hazardous')
with open(filename, 'w') as outfile:
ca_writer = csv.writer(outfile)
ca_writer.writerow(fieldnames)
if not results:
return None
for item in results:
name = item.neo.name
if name is None:
name = ""
to_write = (
item.time,
item.distance,
item.velocity,
item.neo.designation,
name,
item.neo.diameter,
str(item.neo.hazardous)
)
ca_writer.writerow(to_write)
def write_to_json(results, filename):
"""Write an iterable of `CloseApproach` objects to a JSON file.
The precise output specification is in `README.md`. Roughly, the output is a
list containing dictionaries, each mapping `CloseApproach` attributes to
their values and the 'neo' key mapping to a dictionary of the associated
NEO's attributes.
:param results: An iterable of `CloseApproach` objects.
:param filename: A Path-like object pointing to where the data should be saved.
"""
to_write = []
for item in results:
to_write.append(format_ca(item))
with open(filename, 'w') as outfile:
json.dump(to_write, outfile)
def format_ca(ca):
"""Format CloseApproach object for JSON output."""
name = ca.neo.name
if name is None:
name = ""
km = ca.neo.diameter
if km is None:
km = float('nan')
item = {}
item['datetime_utc'] = helpers.datetime_to_str(ca.time)
item['distance_au'] = float(ca.distance)
item['velocity_km_s'] = float(ca.velocity)
item['neo'] = {}
item['neo']['designation'] = ca.neo.designation
item['neo']['name'] = name
item['neo']['diameter_km'] = km
item['neo']['potentially_hazardous'] = ca.neo.hazardous
return item
| true |
7df88eeb23a32c9e5984e86cd85613265ae37efa | Python | vikram2784/covid-vaccine-booking | /src/otp_receiver.py | UTF-8 | 1,392 | 2.53125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import json
import tempfile
class S(BaseHTTPRequestHandler):
def __init__(self, mobile):
self.mobile = mobile
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_POST(self):
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n",
str(self.path), str(self.headers), post_data.decode('utf-8'))
with open(os.path.join(tempfile.gettempdir(), str(self.mobile) + '_cowin_covid_otp'), "w") as f:
f.write(json.loads(post_data)["otp"])
self._set_response()
self.wfile.write(json.dumps({'received': 'ok'}).encode('utf-8'))
def run_otp_receiver(mobile, server_class=HTTPServer, port=8080):
logging.basicConfig(level=logging.INFO)
server_address = ('', port)
handler_class = S(mobile)
httpd = server_class(server_address, handler_class)
logging.info('Starting httpd...\n')
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
#run_otp_receiver()
| true |
cd97a6cff23baecf309c1cf94aa2427162e15ccf | Python | aryanbangad/hello-world | /while loops.py | UTF-8 | 348 | 3.78125 | 4 | [] | no_license | v = int(input("number less than 10: "))
while v < 10:
print("v is ", +v)
print("adding 1 to v to make it bigger")
v=v+1
if v == 3:
print("v is 3 error breaking")
break
else:
print("no problem")
continue
print("cheaking the value of v if it is 10 ")
if v == 10 :
print("true")
print("sucess")
else:
print("false")
print("failed")
| true |
2726d1d8609b558303d34268822ce28fcb83f931 | Python | rmnmrgrd/python | /primes_theodorus_spiral.py | UTF-8 | 1,768 | 3.140625 | 3 | [] | no_license |
def is_prime(x):
if x < 2:
return False
else:
for count in range(2, x):
if x % count == 0:
return False
return True
def drawPoint(num, draw, posX, posY, rectsize):
posX = (maxSize/2)+posX
posY = (maxSize/2)+posY
if is_prime(num):
draw.rectangle([(posX-rectSize-1, posY-rectSize-1), (posX+rectSize+1, posY+rectSize+1)], 0, 0)
else:
draw.rectangle([(posX-rectSize+1, posY-rectSize+1), (posX+rectSize-1, posY+rectSize-1)], 255, 0)
def archimedes(n):
a = 5
theta = 1.4142*(n**(1/2))
r = a*theta
return [r*math.cos(theta), r*math.sin(theta)]
theodorusCoordinates = []
theodorusCoordinates.append([0, 0])
def theodorus(n):
r = math.sqrt(n)
theta = math.atan(1/r) + theodorusCoordinates[n-1][1]
theodorusCoordinates.append([r, theta])
return [ 10*r*math.cos(theta), 10*r*math.sin(theta)]
def fermat(n):
a = 10
theta = 1.4142*(n**(1/2))
return [a*math.sqrt(theta)*math.cos(theta), a*math.sqrt(theta)*math.sin(theta)]
from PIL import Image, ImageDraw
import math
maxSize = 3800
maxNum = 50000
centerX = maxSize/2
centerY = maxSize/2
vertices = 6
im = Image.new("L", (maxSize, maxSize), 255)
draw = ImageDraw.Draw(im)
posX = maxSize/2
posY = maxSize/2
count = 1
step = 10
rectSize = 2
coordinates = None
for count in range(1, maxNum):
# draw rectangle
oldCoordinates = coordinates
coordinates = theodorus(count)
drawPoint(count, draw, coordinates[0], coordinates[1], rectSize)
# if count > 1:
# draw.line(((maxSize/2)+oldCoordinates[0], (maxSize/2)+oldCoordinates[1], (maxSize/2)+coordinates[0], (maxSize/2)+coordinates[1]), 0)
del draw
# write to stdout
#im.save(sys.stdout, "PNG")
im.show()
| true |
2b1c0e9d815d438bbb5950f905b54811b4f5747f | Python | Calprimus/PYTHONFulvio | /python-3-playlist/codecademy_lesson12.py | UTF-8 | 1,377 | 3.671875 | 4 | [] | no_license | # >>>> 1/9
# my_list = [i ** 2 for i in range(1, 11)]
# # Generates a list of squares of the numbers 1 - 10
# f = open("output.txt", "w")
# for item in my_list:
# f.write(str(item) + "\n")
# f.close()
# >>>> 2/9
# my_file = open("output.txt", "r+")
# >>>> 3/9
# my_list = [i ** 2 for i in range(1, 11)]
# my_file = open("output.txt", "r+")
# for item in my_list:
# my_file.write(str(item) + "\n")
# my_file.close()
# >>>> 4/9
# my_file = open("output.txt", "r+")
# print(my_file.read())
# my_file.close()
# >>>> 5/9
# my_file = open("output.txt", "r")
# print(my_file.readline())
# print(my_file.readline())
# print(my_file.readline())
# my_file.close()
# # >>>> 6/9
# # Use a file handler to open a file for writing
# write_file = open("text.txt", "w")
# # Open the file for reading
# read_file = open("text.txt", "r")
# # Write to the file
# write_file.write("Not closing files is VERY BAD.")
# write_file.close()
# # Try to read from the file
# print (read_file.read())
# read_file.close()
# >>>> 7/9
# with open("text.txt", "w") as textfile:
# textfile.write("Success!")
# >>>> 8/9
# my_file = open("text.txt", "w")
# with my_file as file:
# file.write("Yo dude wsup")
# >>>> 9/9
my_file = open("text.txt", "w")
with my_file as file:
file.write("Yo dude wsup")
if my_file != my_file.closed:
my_file.close()
print(my_file.closed)
| true |
df68b9d5efa810a4c2c122fc2d4c0b67f012c34e | Python | nicholask98/simple-text-adventure | /rooms.py | UTF-8 | 1,408 | 3.75 | 4 | [] | no_license | import movement
def room(room_num):
options = movement.determine_door_options()
if room_num == 0:
print('You wake up in a room. There is a door in front of you and one to the right.')
# Options holds a list that contains all the door option letters available
direction = str(input("Enter the direction you'd like to go:{}".format(str(options)))).lower()
movement.move(direction)
elif room_num == 1:
pass
elif room_num == 2:
pass
elif room_num == 3:
pass
elif room_num == 4:
pass
elif room_num == 5:
pass
elif room_num == 6:
pass
elif room_num == 7:
pass
elif room_num == 8:
pass
elif room_num == 9:
pass
elif room_num == 10:
pass
elif room_num == 11:
pass
elif room_num == 13:
print('You escaped!')
return False
return True
def visited_room(room_num):
options = movement.determine_door_options()
direction = str(input("Enter the direction you'd like to go:{}".format(str(options)))).lower()
movement.move(direction)
# FIXME: When I run main, I only get two cycles through the game loop.
# I think it has something to do with the 2nd time through the loop
# being treated as you're revisiting a room instead of encountering
# a new one. More details on movement.py
return False | true |
9f9d8ec231f7e406cbbea199371c1b02e2e27612 | Python | mwan780/softconass1 | /demo/demo03.py | UTF-8 | 143 | 3.0625 | 3 | [] | no_license | #!/usr/bin/python2.7 -u
text = "print"
test = 'test'
true = 0
true += 1
if (true and 1) : print "This is a " + text + " " + test + " hello"
| true |
5bdb4ce98b3b0f5b49688705a666a07f74e3f41e | Python | Raisin5488/COMPSCI-235-A3 | /movie_web_app/adapters/database_repository.py | UTF-8 | 4,813 | 2.734375 | 3 | [] | no_license | import csv
import os
import sqlite3
from abc import ABC
from sqlalchemy import engine
from sqlalchemy.engine import Engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from movie_web_app.adapters.movieFileCSVReader import MovieFileCSVReader
from movie_web_app.adapters.orm import movies_actors, movies_genres
from movie_web_app.adapters.repository import AbstractRepository
from movie_web_app.domain.actor import Actor
from movie_web_app.domain.director import Director
from movie_web_app.domain.genre import Genre
from movie_web_app.domain.movie import Movie
from movie_web_app.domain.review import Review
from movie_web_app.domain.user import User
def populate(session_factory, data_path, filename):
movie_file_reader = MovieFileCSVReader(filename)
movie_file_reader.read_csv_file()
movie_file_reader.read_csv_file_movies()
# create a configured "Session" class
Session = sessionmaker(bind=session_factory)
# create a Session
session = Session()
# session = session_factory()
# This takes all movies from the csv file (represented as domain model objects) and adds them to the
# database. If the uniqueness of directors, actors, genres is correctly handled, and the relationships
# are correctly set up in the ORM mapper, then all associations will be dealt with as well!
for movie in movie_file_reader.dataset_of_movies:
session.add(movie)
session.commit()
class SessionContextManager:
def __init__(self, session_factory):
self.__session_factory = session_factory
self.__session = scoped_session(self.__session_factory)
def __enter__(self):
return self
def __exit__(self, *args):
self.rollback()
@property
def session(self):
return self.__session
def commit(self):
self.__session.commit()
def rollback(self):
self.__session.rollback()
def reset_session(self):
self.close_current_session()
self.__session = scoped_session(self.__session_factory)
def close_current_session(self):
if self.__session is not None:
self.__session.close()
class SqlAlchemyRepository(AbstractRepository, ABC):
def __init__(self, session_factory):
self._session_cm = SessionContextManager(session_factory)
def close_session(self):
self._session_cm.close_current_session()
def reset_session(self):
self._session_cm.reset_session()
def __iter__(self):
self._current = 0
return self
def __next__(self):
if self._current >= len(self.dataset_of_movies):
raise StopIteration
else:
self._current += 1
return self.dataset_of_movies[self._current - 1]
def add_user(self, user: User):
with self._session_cm as scm:
scm.session.add(user)
scm.commit()
def get_movies(self):
all_movies = []
try:
all_movies = self._session_cm.session.query(Movie).all()
except NoResultFound:
print("No movies found in DB.")
pass
return all_movies
def get_director_name(self, director_to_find):
temp = self._session_cm.session.query(Movie).join(Director).filter(Director.director_full_name.like(director_to_find))
return temp
def get_exact_movie(self, title_to_find, year):
temp = self._session_cm.session.query(Movie).filter(Movie.title.like(title_to_find)).first()
print(temp)
return temp
def get_movie_title(self, title):
temp = self._session_cm.session.query(Movie).filter(Movie.title.like(title))
return temp
def get_actor_name(self, actor):
temp = self._session_cm.session.query(Movie).join(movies_actors).join(Actor).filter(Actor.actor_full_name.like(actor))
return temp
def get_genre_name(self, genre):
temp = self._session_cm.session.query(Movie).join(movies_genres).join(Genre).filter(Genre.genre_name.like(genre))
return temp
def get_user(self, username) -> User:
user = None
try:
user = self._session_cm.session.query(User).filter(User.username.like(username)).one()
except NoResultFound:
# Ignore any exception and return None.
pass
return user
def get_reviews(self):
return self._session_cm.session.query(Review).all()
def add_review(self, movie: Movie, review_text: str, rating: int, user: str):
user_to_add = self._session_cm.session.query(User).filter(User.username.like(user)).one()
review = Review(movie, review_text, rating, user_to_add)
with self._session_cm as scm:
scm.session.add(review)
scm.commit()
| true |
1a3df450147888d8bee2b7350616ae32c532edc0 | Python | REOSTA9/EvolutionaryComputation_2019.1 | /EC_P02/main2.py | UTF-8 | 4,785 | 2.671875 | 3 | [] | no_license | #----------------Imports--------------------
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from scipy import stats
from itertools import compress
#----------------Modules ---------------------
import ga_evo as ga
import ee_evo as ee
import de_evo as de
import ep_evo as ep
import ee_cauchy2 as eec
#import neural_network as nn
import random_forest as rf
#-------- Extructures----------------
ga_accu = [0.63,
0.68,
0.63,
0.57,
0.65,
0.61,
0.67,
0.68,
0.65,
0.62,
0.68]
ee_accu = [0.76,
0.75,
0.85,
0.65,
0.58,
0.49,
0.78,
0.58,
0.8,
0.73,
0.8]
ep_accu = [0.69,
0.46,
0.63,
0.67,
0.71,
0.64,
0.69,
0.67,
0.61,
0.49,
0.59,]
de_accu = [0.65,
0.74,
0.71,
0.69,
0.72,
0.81,
0.73,
0.68,
0.75,
0.80,
0.81]
ee_caucht_accu = [0.63,
0.68,
0.63,
0.57,
0.65,
0.61,
0.67,
0.68,
0.65,
0.62,
0.68]
ee_cauchy2_accu = [0.63,
0.68,
0.63,
0.57,
0.65,
0.61,
0.67,
0.68,
0.65,
0.62,
0.68]
normal_accu = [0.74,
0.56,
0.7,
0.62,
0.66,
0.67,
0.53,
0.48,
0.65,
0.46,
0.7]
def scores(acuracy_list):
#scaler = StandardScaler()
series = pd.Series(acuracy_list)
media = series.mean()
dp = series.std()
mediana = series.median()
return media,dp,mediana
#---------------Gosset's hipotesis calculatiom---
def tstudant_hipotesis(a,b):
t2, p2 = stats.ttest_ind(a, b)
return t2,p2
#--------------Log Generator (Could have been saved in a txt file, but naaaah)--------------
def showHipotesis():
print("Noemal e GA")
t2, p2 = tstudant_hipotesis(normal_accu,ga_accu)
print("T-value:", t2, "P-value:", p2)
print("Noemal e EP")
t2, p2 = tstudant_hipotesis(normal_accu, ep_accu)
print("T-value:", t2, "P-value:", p2)
print("Normal e EE")
t2, p2 = tstudant_hipotesis(normal_accu, ee_accu)
print("T-value:", t2, "P-value:", p2)
print("Noemal e DE")
t2, p2 = tstudant_hipotesis(normal_accu, de_accu)
print("T-value:", t2, "P-value:", p2)
print("Noemal e EE_Cauchy")
t2, p2 = tstudant_hipotesis(normal_accu, ee_caucht_accu)
print("T-value:", t2, "P-value:", p2)
print("Noemal e EE_Cauchy2")
t2, p2 = tstudant_hipotesis(normal_accu, ee_cauchy2_accu)
print("T-value:", t2, "P-value:", p2)
print("GA e EP")
t2,p2 = tstudant_hipotesis(ga_accu,ep_accu)
print("T-value:",t2,"P-value:",p2)
print("GA e EE")
t2, p2 = tstudant_hipotesis(ga_accu, ee_accu)
print("T-value:", t2, "P-value:", p2)
print("GA e DE")
t2, p2 = tstudant_hipotesis(ga_accu, de_accu)
print("T-value:", t2, "P-value:", p2)
print("GA e ee_cauchy")
t2, p2 = tstudant_hipotesis(ga_accu, ee_caucht_accu)
print("T-value:", t2, "P-value:", p2)
print("GA e ee_cauchy2")
t2, p2 = tstudant_hipotesis(ga_accu, ee_cauchy2_accu)
print("T-value:", t2, "P-value:", p2)
print("EP e EE")
t2, p2 = tstudant_hipotesis(ep_accu, ee_accu)
print("T-value:", t2, "P-value:", p2)
print("EP e DE")
t2, p2 = tstudant_hipotesis(ep_accu, de_accu)
print("T-value:", t2, "P-value:", p2)
print("EP e ee_cauchy")
t2, p2 = tstudant_hipotesis(ep_accu, ee_caucht_accu)
print("T-value:", t2, "P-value:", p2)
print("EP e ee_cauchy2")
t2, p2 = tstudant_hipotesis(ep_accu, ee_cauchy2_accu)
print("T-value:", t2, "P-value:", p2)
print("EE e DE")
t2, p2 = tstudant_hipotesis(ee_accu, de_accu)
print("T-value:", t2, "P-value:", p2)
print("EE e ee_cauchy")
t2, p2 = tstudant_hipotesis(ee_accu, ee_caucht_accu)
print("T-value:", t2, "P-value:", p2)
print("EE e ee_cauchy2")
t2, p2 = tstudant_hipotesis(ee_accu, ee_cauchy2_accu)
print("T-value:", t2, "P-value:", p2)
print("DE e ee_cauchy2")
t2, p2 = tstudant_hipotesis(de_accu, ee_caucht_accu)
print("T-value:", t2, "P-value:", p2)
print("DE e ee_cauchy2")
t2, p2 = tstudant_hipotesis(de_accu, ee_cauchy2_accu)
print("T-value:", t2, "P-value:", p2)
#---------Função Fuleca só pra gerar o Log-----
def showStatistics(acuracy_list):
mean, dp, median = scores(acuracy_list[0])
print("###-------Algoritm:-----###",acuracy_list[1],"\n")
print("#---Mean: ", mean,"\n")
print("#---DP:", dp,"\n")
print("#---Median:", median,"\n")
#--------------These code lines above Do de SD, median and mean calculus---------------------------
List_ofLists =[(normal_accu, "Normal RF"),(ga_accu,"Genetic Algoritm"),(de_accu,"Discrete Evolution"), (ee_accu,"Evolutionary Estrategy"),(ep_accu,"Evolutionary Programming"),(ee_caucht_accu, "Cauchy EE normal"), (ee_cauchy2_accu, "Cauchy EE Crossover")]
for acuracy_list in List_ofLists:
#print(acuracy_list[0])
showStatistics(acuracy_list)
#-------------This call above calls t-studant hipotesis for classifiers variations-----------------
showHipotesis() | true |
262dbd74aca7f340d9135582952336ea6cc28387 | Python | AyuJ01/forsk | /day8/default dict.py | UTF-8 | 315 | 3.109375 | 3 | [] | no_license | from collections import defaultdict
from collections import Counter #
from collections import OrderedDict
from collections import namedtuple
s= [1,5,2,3,4,1,3,5,2,4,1,3,5,1,4]
dd = defaultdict(int)
for key in s:
dd[key]+=1
print(dd)
def sump(a,b):
return a+b, a*b
s, p = sump(2,3)
print(s,p) | true |
d2ad3c29150c83fa3e3928995f2a0695efec1f05 | Python | bpaulmier/PROJ-H402 | /boundRectTest.py | UTF-8 | 3,013 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 19:53:53 2020
@author: benjaminpaulmier
"""
import cv2 as cv
import os
import time #to measure performance
def extractFramesWithoutBR(videoName):
a=time.time()
vidcap = cv.VideoCapture('/Users/benjaminpaulmier/downloads/'+videoName+'.MOV') #vidcap contains the video
nextFrameExists,image = vidcap.read() # "read" grabs, decodes and returns the next frame
#create the folder in which the frames will be kept
os.mkdir('/Users/benjaminpaulmier/downloads/frames1/')
count=1
while nextFrameExists:
cv.imwrite('/Users/benjaminpaulmier/downloads/frames1/frame%d.jpg' % count, image)
nextFrameExists,image = vidcap.read()
count +=1
return time.time()-a
def extractFramesWithBR(videoName):
a=time.time()
vidcap = cv.VideoCapture('/Users/benjaminpaulmier/downloads/'+videoName+'.MOV') #vidcap contains the video
nextFrameExists,image = vidcap.read() # "read" grabs, decodes and returns the next frame
#create the folder in which the frames will be kept
os.mkdir('/Users/benjaminpaulmier/downloads/frames2/')
count=1
while nextFrameExists:
cv.imwrite('/Users/benjaminpaulmier/downloads/frames2/frame%d.jpg' % count, image) #imwrite saves an image to a specified file.
src = cv.imread('/Users/benjaminpaulmier/downloads/frames2/frame%d.jpg' % count)
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
gray = cv.blur(gray, (5,5)) #to eliminate "false" contours
# Detect edges using Canny edge detection
canny_output = cv.Canny(gray, 100, 200)
# Find contours
_, contours, hierarchy = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Approximate contours to polygons + get bounding rects
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv.approxPolyDP(c, 3, True)
boundRect[i] = cv.boundingRect(contours_poly[i])
#Calculate the final cropped ROI
keyValues = [boundRect[0][0],boundRect[0][1],boundRect[0][0]+boundRect[0][2],boundRect[0][1]+boundRect[0][3]]
for i in range(1,len(boundRect)):
if boundRect[i][0] < keyValues[0]:
keyValues[0] = boundRect[i][0]
if boundRect[i][1] < keyValues[1]:
keyValues[1] = boundRect[i][1]
if boundRect[i][0]+boundRect[i][2] > keyValues[2]:
keyValues[2] = boundRect[i][0]+boundRect[i][2]
if boundRect[i][1]+boundRect[i][3] > keyValues[3]:
keyValues[3] = boundRect[i][1]+boundRect[i][3]
#Obtain cropped image of ROI
finalRect = src[keyValues[1]:keyValues[3], keyValues[0]:keyValues[2]]
#finalRect won't be used, this is simply to test the time it takes to compute it
nextFrameExists,image = vidcap.read()
count +=1
return time.time()-a | true |
e8dab6a7b09f4fa595ddc9e41147d5219237efdf | Python | nagyistoce/eucalyptus-ui | /eucaconsole/response.py | UTF-8 | 594 | 2.71875 | 3 | [] | no_license | # a wrapper object for json responses to the browser
class Response(object):
results = None
def __init__(self, results):
self.results = results
class ClcError(object):
status = None
summary = None
message = None
def __init__(self, status, summary, message):
self.status = status
self.summary = summary
# trim up message so we don't overload the browser, trim starting at "Caused by"
idx = -1;
if message:
idx = message.find("Caused by")
self.message = (message if (idx == -1) else message[:idx-1])
| true |
d97e74050677dbac5d970aaf6d2a0890336c6162 | Python | campanulamediuml/lbandkg | /lbandkg.py | UTF-8 | 309 | 3.1875 | 3 | [] | no_license | while True:
data = str(raw_input('input the weight with unit:'))
if data == 'exit' :
exit()
else:
unit = data[len(data)-2:]
temp = filter(str.isdigit, data)
if unit == 'kg' :
lb = 2.20458554 * float(temp)
print lb,'lb'
elif unit == 'lb' :
kg = float(temp) // 2.20458554
print kg,'kg'
| true |
b10a2a1d1b6bf5d9bafb3d60de0b211af988b099 | Python | yufengjinsong/wxPython | /app/douyu/html_parser.py | UTF-8 | 2,980 | 2.546875 | 3 | [] | no_license | #coding:utf-8
from bs4 import BeautifulSoup
import json
class HtmlParser(object):
def __init__(self):
self.liveRoom = []
self.attentionRoom = []
def _get_live_room(self, soup):
lists = soup.find_all("li")
if lists is None or len(lists) == 0:
return
cur_rooms = []
for list in lists:
rInfo = list.find("a")
if "data-rid" not in rInfo.attrs:
continue
rid = rInfo.attrs["data-rid"]
rName = rInfo.attrs["title"]
uNameInfo = rInfo.find("span",class_="dy-name")
if uNameInfo is None:
rUname = '-未识别-'
else:
rUname = uNameInfo.get_text()
rNumber = rInfo.find("span",class_="dy-num")
if rNumber is None:
rNum = 0
else:
rNum = rNumber.get_text()
tmpRoom = {
"roomId":rid,
"roomName":rName,
"nickName":rUname,
"roomNum":rNum
}
self.liveRoom.append(tmpRoom)
cur_rooms.append(tmpRoom)
return cur_rooms
def getLiveRoom(self):
return self.liveRoom
def clearAttentionRoom(self):
self.attentionRoom = []
def getAttentionRoom(self):
return self.attentionRoom
def parse(self,html_cont):
soup = BeautifulSoup(html_cont,'html.parser',from_encoding="utf-8")
return self._get_live_room(soup)
def jsonParse(self,content):
roomInfo = json.loads(content["$ROOM"])
roomshowData = json.loads(content["$ROOM.showData"])
print(roomshowData["child_cate"]["url"])
return
#已开播
if roomInfo["show_status"] == 1:
self.attentionRoom.append({
"roomId":str(roomInfo['room_id']),
"roomName":roomInfo['room_name'],
"nickName":roomInfo['owner_name'],
"roomNum":roomInfo['levelInfo']['upgrade_exp']
})
def liveDataByRoomIdParse(self,content):
roomInfo = json.loads(content["$ROOM"])
roomshowData = json.loads(content["$ROOM.showData"])
search_url = ''
if "child_cate" not in roomshowData:
search_url = roomshowData["game"]["url"]
else:
search_url = roomshowData["child_cate"]["url"]
return {
"roomId":str(roomInfo['room_id']),
"roomName":roomInfo['room_name'],
"nickName":roomInfo['owner_name'],
"show_status":roomInfo["show_status"],
"search_url":search_url
}
| true |
3a68c12085dcf95ba453d21fba5c7d0017c3a012 | Python | rsarathy/prize-banking | /simulation.py | UTF-8 | 1,489 | 3.3125 | 3 | [] | no_license | # A base class for simulating a bank that provides prize-based savings accounts.
#
# For the purposes of this simulation, we make the following assumptions:
#
# * A bank offers loans to its customers at an average of 8%.
# * Wealth follows a standard Pareto distribution, where roughly 20% of the
# accounts will own 80% of the assets under management in our bank.
# * All assets under management will be loaned out at a rate of 8%.
# * A bank will pay out "prizes" according to the latest recording balance
# before the formal end of year "drawing" takes place.
import numpy as np
# The assets under management (AUM) controlled by the bank.
AUM = 1
# The rates that the bank will offer loans at.
LOAN_RATE = 0.08
# The interest rate that the bank will offer to savings account by default.
INTEREST_RATE = 0.03
# The number of savings accounts under the bank.
NUM_ACCOUNTS = 10000
class BankingSimulation(object):
def __init__(self):
self.accounts = _make_accounts()
self.assets_under_management = np.sum(self.accounts)
self.profits = []
def _award_accounts(self):
raise NotImplementedError()
def _make_accounts():
# Seed all saving accounts with wealth obeying a standard Pareto distribution.
accounts = np.random.pareto(1, NUM_ACCOUNTS)
# Scale each balance equally such that the assets under management is
# 1 unit.
temp_sum = np.sum(accounts)
for i in xrange(len(accounts)):
accounts[i] *= (AUM / temp_sum)
return accounts
| true |
dfb21821c7b8698154d4e4f1b43633e84a775e5b | Python | tmu-nlp/100knock2021 | /seiichi/chapter04/36.py | UTF-8 | 334 | 2.609375 | 3 | [] | no_license | import pickle
from collections import Counter
import matplotlib.pyplot as plt
with open("./mecab.bin", "rb") as f:
m = pickle.load(f)
sf = [d['surface'] for sent in m for d in sent]
cnt = Counter(sf).most_common()
ws, ns = [w for w, _ in cnt[:10]], [n for _, n in cnt[:10]]
plt.bar(ws, ns)
plt.title('頻度上位')
plt.show()
| true |
9006a21912f46dbca1188a952a2f86bf4ad41c11 | Python | zeemba/pyclass | /fortwo.py | UTF-8 | 104 | 3.53125 | 4 | [] | no_license | number = 0
for i in range(0,101,2):
number = number + i
print(str(number) + ' and ' + str(i)) | true |
108a5d1d620c9f35fd4313675bc35187c5679183 | Python | EduRibeiro00/hashcode-training | /practice/algs/hill_climbing.py | UTF-8 | 2,059 | 3.5 | 4 | [] | no_license | from ..state import State
def hill_climbing(init_state: State, num_its: int):
"""Regular hill climbing algorithm."""
state = init_state
for _ in range(num_its):
# create generator that yields all state neighbors
old_state = state
neighbours = state.get_all_neighbours(generator=True)
for neighbour in neighbours:
# new best neighbor
if state < neighbour:
state = neighbour
break
# state was not updated, return
if old_state == state:
return state
return state
def steepest_ascent(init_state: State, num_its: int):
"""HC that gets the best neighbor."""
state = init_state
for _ in range(num_its):
# create generator that yields all state neighbors
neighbours = state.get_all_neighbours(generator=False)
best_neighbour = max(neighbours)
if state <= best_neighbour:
state = best_neighbour
return state
def stochastic_hill_climbing(init_state: State, max_tries: int):
"""Gets random neighbor each time."""
num_tries = 0
state = init_state
while max_tries > num_tries:
random_neighbor = state.get_random_neighbour()
if random_neighbor.get_value() > state.get_value():
state = random_neighbor
num_tries = 0
else:
num_tries += 1
if num_tries < max_tries:
break
return state
def random_restart(init_state: State, max_tries: int, rr_its: int):
"""Start again in a new position after a while.
Good for when there are a lot of local maxs/mins."""
num_tries = 0
state = init_state
while rr_its > num_tries:
random_neighbor = state.get_random_neighbour()
if random_neighbor.get_value() > state.get_value():
state = random_neighbor
num_tries = 0
else:
num_tries += 1
if num_tries < max_tries:
state.calc_random_solution()
num_tries = 0
return state | true |
5805988233917eca51e0506113b13f70fc5f0a9b | Python | CindyAloui/MasComp2 | /get_400_nouns.py | UTF-8 | 993 | 2.65625 | 3 | [] | no_license | import io
import os
from random import shuffle
seeds_nouns = set()
def update(file) :
for line in file:
seeds_nouns.add(line[:-1])
result = io.open("data/nouns_lists/nouns_to_annot.txt", "w", encoding="utf8")
for filename in os.listdir("data/seeds/seeds-super-supersenses/"):
if "seeds." in filename and "not_" not in filename and ".py" not in filename:
print(filename)
file_seed1 = io.open("data/seeds/seeds-super-supersenses/" + filename, "r", encoding="utf8")
file_seed2 = io.open("data/seeds/seeds-super-supersenses/" + "not_" + filename, "r", encoding="utf8")
update(file_seed1)
update(file_seed2)
nouns_file = io.open("data/nouns_lists/10000-nouns.txt", "r", encoding="utf8")
nouns = []
i = 0
for line in nouns_file:
if i == 5000:
break
line = line.split("\t")
if line[0] not in seeds_nouns:
nouns.append(line[0])
i += 1
shuffle(nouns)
for i in range(400):
result.write(nouns[i] + "\n")
| true |
ae22e89d8371915dad4296a579df2e372f36cee5 | Python | AnkitAgarwal2108/Python | /sorting/insertion_sort.py | UTF-8 | 380 | 3.609375 | 4 | [] | no_license | l = [21, 13, 26, 15, 3, 7, 18]
def insertion_sort(l):
for i in range(1, len(l)):
current_index = i
current_value = l[i]
while current_index > 0 and l[current_index - 1] > current_value:
l[current_index] = l[current_index - 1]
current_index -= 1
l[current_index] = current_value
return l
print(insertion_sort(l))
| true |
3f6cd557f247f609ce0887c066dc938434bd809b | Python | octopart/solrcloudpy | /solrcloudpy/collection/search.py | UTF-8 | 4,600 | 2.609375 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | """
Query and update a Solr collection
"""
from solrcloudpy.utils import CollectionBase
import datetime as dt
import json
dthandler = lambda obj: obj.isoformat() if isinstance(obj, dt.datetime) else None
class SolrCollectionSearch(CollectionBase):
"""
Performs search-related operations on a collection
"""
def __repr__(self):
return "SolrIndex<%s>" % self.name
def _send(self,path,params,method='GET',body=None):
return self.client.request(path,params,method=method,body=body)
def _update(self,body):
path = '%s/update/json' % self.name
resp = self._send(path,method='POST',params={},body=body)
return resp
def search(self,params):
"""
Search this index
:param params: query parameters. Here `params` can be a :class:`~solrcloudpy.parameters.SearchOptions` instance, a dictionary or a list of tuples
"""
path = "%s/select" % self.name
data = self._send(path,params)
return data
def clustering(self,params):
"""
Perform clustering on a query
:param params: query parameters. Here `params` can be a :class:`~solrcloudpy.parameters.SearchOptions` instance, a dictionary or a list of tuples
"""
path = "%s/clustering" % self.name
data = self._send(path,params)
return data
def mlt(self, params):
"""
Perform MLT on this index
:param params: query parameters. Here `params` can be a :class:`~solrcloudpy.parameters.SearchOptions` instance, a dictionary or a list of tuples
"""
path = "%s/mlt" % self.name
data = self._send(path,params)
return data
def add(self,docs):
"""
Add a list of document to the collection
:param docs: a list of documents to add
"""
message = json.dumps(docs,default=dthandler)
response = self._update(message)
if response.code != 200:
raise Exception(response.result)
return response
def delete(self,id=None,q=None,commit=True):
"""
Delete documents in a collection. Deletes occur either by id or by query
:param id: the id of the document to pass.
:param q: the query matching the set of documents to delete
:param commit: whether to commit the change or not
"""
if id is None and q is None:
raise ValueError('You must specify "id" or "q".')
elif id is not None and q is not None:
raise ValueError('You many only specify "id" OR "q", not both.')
elif id is not None:
m = json.dumps({"delete":{"id":"%s" % id }})
elif q is not None:
m = json.dumps({"delete":{"query":"%s" % q }})
self._update(m)
if commit:
self.commit()
def optimize(self,waitsearcher=False,softcommit=False):
"""
Optimize a collection for searching
:param waitsearcher: whether to make the changes to the collection visible or not
by opening a new searcher
:param softcommit: whether to perform a soft commit when optimizing
"""
waitsearcher = str(waitsearcher).lower()
softcommit = str(softcommit).lower()
params = {'softCommit': softcommit,
'waitSearcher': waitsearcher,
'optimize': 'true'
}
path = '%s/update' % self.name
res = self.client.get(path,params=params).result
return res
def commit(self):
""" Commit changes to a collection """
response = self._update('{"commit":{}}')
if response.code != 200:
raise Exception(response.result.error.trace)
return response
def analyze(self, docs):
path = '%s/analysis/document' % self.name
message = '<docs>'
for doc in docs:
message += '<doc>'
for (fieldname, value) in doc.items():
if not isinstance(value, list):
value = [value]
for component in value:
if isinstance(component, str):
component = component.decode('utf-8')
message += '<field name="%s">%s</field>' % (fieldname,
component)
message += '</doc>'
message += '</docs>'
return self.client.request(path, params={'wt': 'json', 'indent': 'true'},
method='POST', body=message)
| true |
7517825bc90ac816c51bf8a0d2d6edf1245e96ac | Python | nikoGao/Python-Structure | /Structure/Chapter_3/Palindrome Checker.py | UTF-8 | 472 | 3.171875 | 3 | [] | no_license | # Can be connected with all palindrome in leetcode
from Structure.Chapter_3.Dequeue import Dequeue
def palchecker(aString):
paldeque = Dequeue()
for letter in aString:
paldeque.addRear(letter)
while not paldeque.isEmpty() and paldeque.size() > 1:
front = paldeque.removeFront()
back = paldeque.removeRear()
if front != back:
return False
return True
print(palchecker("lsdkjfskf"))
print(palchecker("radar")) | true |
fed1f7da6a8eb4fc4b125e2731ac84fa65b7f5d6 | Python | LoreMD/proyectoMateCompu | /PilaArena2D.py | UTF-8 | 2,041 | 3.203125 | 3 | [] | no_license | class montonArena:
def __init__ (self, L):
self.L=L
self.array=self.startRandom()
self.pendienteCritica=15
def dinamicaArena(self):
aux=np.zeros((self.L, self.L))
bandera=True
j=1
while bandera==True:
for i in xrange(1, self.L-1):
if j==self.L-1:
bandera=False
else:
pendiente=self.array[i,j]+self.array[i,j+1]
pendiente+=self.array[i, j-1]
pendiente+=self.array[i+1, j]
pendiente+=self.array[i-1, j-1]
if pendiente>self.pendienteCritica:
bandera=False
"""la bandera es para sacarte del while, porque tiene que checar que la pila completa sea estable/inestable"""
j+=1
if pendiente>self.pendienteCritica:
for j in xrange(0, self.L, 1):
for i in xrange(0,self.L-1,1):
aux[j,i]=self.array[j,i]-1
aux[j,i+1]=self.array[j,i+1]+1
"""
se desparrama
"""
self.array=aux
else:
"""
crece
"""
pass
def a(self):
return self.array
"""
codigo siguiente copiado de lo de clase, y modificado para 2 dimensiones
"""
def startSingle(self):
"""solo un granito de arena/fuego a la mitad del arreglo"""
self.array[self.L/2, self.L/2] = 1
def startRandom(self):
"""Valores aleatorios en el tiempo t_0"""
array=np.zeros([self.L, self.L])
for i in xrange(0,self.L,1):
for j in xrange(0, self.L,1):
array[i,j]=randint(1,10)
return array
def loop(self, steps=1):
"""Ejecuta el número especificado de pasos."""
[self.step() for i in xrange(steps)]
def step(self):
"""Avanza un paso t -> t+1."""
pass | true |
9224d3635fd2056ce9c799fc7bc73844dac1a118 | Python | IyappanSamiraj/Python-Basic- | /Amstrong Number between n and k limits.py | UTF-8 | 211 | 3.1875 | 3 | [] | no_license | n,k=map(int,input().split())
for i in range(n+1,k):
iyps=0
temp=i
while(temp>0):
guvi = temp % 10
iyps = iyps + guvi ** 3
temp //= 10
if i==iyps:
print(i,end=" ")
| true |
ac967bce8150c33d10d5077e81e6fef9cc5b0799 | Python | leeang/Communication-Networks | /ws3/code/section1/mm1.py | UTF-8 | 1,329 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 15 12:16:53 2014
HINTS:
1) Analyse the script from bottom to top
2) Fill in the blanks marked with ???
@author: alpcan
"""
import numpy as np
import matplotlib.pylab as plt
from wsmm1helper import *
def Theoreticalmm1(srate,arate):
meandelay = 1/(srate-arate)
rho = float(arate)/float(srate)
meansize = rho/(1-rho)
# Hint for display...
print 'Mean theoretical delay: {:4f} \n'.format(meandelay)
print 'Mean theoretical size : {:4f} \n'.format(meansize)
###########################################################
## Main program
# parameters
maxsteps=int(1E5) # simulation steps
srate=4 # service rate
arate=3 # arrival rate
# create simulation
simulation=DESmm1(srate,arate,maxsteps)
# main loop
for i in range(maxsteps):
intarrive=np.random.exponential(1.0/arate) # interarrival time
simulation.packetarrival(intarrive)
servetime=np.random.exponential(1.0/srate) # service time
simulation.nextstep(servetime)
# calculate and print theoretical values
# you can also do this in Matlab if you prefer!
Theoreticalmm1(srate,arate)
# calculate and print practical delay, size values
# optionally visualise
simulation.practicalcalc(True,True)
| true |