blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3c51b647545752c7ab0621b75a8c83364151fc58 | Python | christymthmas/amazonPriceDropNotification | /amazonDetails.py | UTF-8 | 3,670 | 3.171875 | 3 | [] | no_license | def getProductTitle(soup):
title = soup.find("span", id = "productTitle").text.strip()
return title
def getBrandName(soup):
brandName = soup.find("a", id = "bylineInfo").text.strip().replace("Visit the ","").replace("Brand: ","").replace(" Store","")
return brandName
def getMRP(soup):
mrpstr = soup.find("span", class_ = "priceBlockStrikePriceString a-text-strike").text.strip().replace("₹","").replace(",","")
mrp=round(float(mrpstr))
return mrp
def getamzPrice(soup):
try:
amzPriceStr = soup.find_all("td", class_ ="a-span12")[1].text.strip().replace("₹","").replace(",","")
except:
try:
amzPriceStr = soup.find("span", id="priceblock_dealprice").text.strip().replace("₹","").replace(",","")
except:
amzPriceStr = soup.find("span", id="priceblock_ourprice").text.strip().replace("₹","").replace(",","")
amzPrice=round(float(amzPriceStr))
return amzPrice
def getYouSave(soup):
youSaveStr = soup.find("td", class_ ="a-span12 a-color-price a-size-base priceBlockSavingsString").text.strip().replace("₹","").replace(",","")
youSavePercentageStr=youSaveStr[-5:]
youSavePercentage=int(youSavePercentageStr.strip(")%").strip().replace("(", ""))
youSave=round(float(youSaveStr[:-5].strip()))
return youSave, youSavePercentage
def getAvailability(soup):
avail = soup.find("div", id="availability").text.strip()
if avail == "In stock.":
return True
else:
return False
def printDetails(soup):
try:
productTitle = getProductTitle(soup) # title of the product
brandName = getBrandName(soup) # name of the Brand
mrp = getMRP(soup) # mrp
amazonPrice = getamzPrice(soup) # current price on Amazon
youSave, youSavePercentage = getYouSave(soup) # the saved money and percentage of saved
except:
try:
productTitle = getProductTitle(soup)
brandName = getBrandName(soup)
amazonPrice = getamzPrice(soup)
print("""
----------------------------------------------------------------------------------------
Product\t\t:\t{}
Brand Name\t\t:\t{}
Amazon Price\t:\t₹ {:,}
----------------------------------------------------------------------------------------""".format(productTitle,brandName,amazonPrice))
return True
except:
try:
productTitle = getProductTitle(soup)
brandName = getBrandName(soup)
print("""
----------------------------------------------------------------------------------------
Product\t\t:\t{}
Brand Name\t\t:\t{}
(Unable to fetch the price details)
----------------------------------------------------------------------------------------""".format(productTitle,brandName))
return False
except:
print("\n\n------------------unable to fetch the product details------------------\n\n")
return False
if getAvailability(soup):
availability="This product is currently in Stock!"
else:
availability="This product is currently NOT in Stock!"
print("""
----------------------------------------------------------------------------------------
Product\t\t:\t{}
Brand Name\t\t:\t{}
Amazon Price\t:\t₹ {:,}
MRP\t\t\t:\t₹ {:,}
\tYou'll save ₹ {:,} ({}%)
{}
----------------------------------------------------------------------------------------
""".format(productTitle,brandName,amazonPrice,mrp,youSave,youSavePercentage,availability))
return True
| true |
82db0d89557d22364f83156c7e691e48c28e2788 | Python | pirg/PartitionFunction | /check_partfunc.py | UTF-8 | 2,241 | 2.578125 | 3 | [
"MIT"
] | permissive | # import os
import numpy as np
import astropy.units as u
import astropy.constants as c
import astropy.io as io
from astropy.table import Column
import pylab as pl
pl.ion()
names = ['freq', 'freq_err', 'logI', 'df', 'El_cm',
'gu', 'tag', 'qncode', 'qn', 'specname']
def load_spec(tag):
""""""
tb = io.ascii.read(
'specdata/{}.txt'.format(tag),
col_starts=[0, 14, 21, 29, 31, 41, 44, 51, 55, 81],
col_ends=[13, 20, 28, 30, 40, 43, 50, 54, 80, 100],
format='fixed_width_no_header',
names=names)
Eu_cm = Column(name='Eu_cm',
data=tb['El_cm']*(1/u.cm)+(tb['freq']*u.MHz/c.c).to(1/u.cm))
tb.add_column(Eu_cm)
return tb
def compute_qpart(table, T):
""""""
q = 1
for row in table:
q += row['gu']*np.exp(-row['Eu_cm']*(1/u.cm)*c.h*c.c/(c.k_B*T*u.K))
return np.log10(q)
def get_qpart(tag):
""""""
if str(tag)[-3:-2] == '5':
db = 'cdms'
col_starts = [0, 7, 31, 45]+[45+13*(i+1) for i in range(10)]
col_ends = [i-1 for i in col_starts[1:]] + [1000]
names = ['tag', 'molecule', '#lines', '1000', '500', '300',
'225', '150', '75', '37.5', '18.75', '9.375', '5.000',
'2.725']
tb_qpart = io.ascii.read(
'partfunc/{}.part'.format(db),
format='fixed_width_no_header',
guess=False,
col_starts=col_starts,
col_ends=col_ends,
data_start=2,
comment='=',
delimiter=' ',
fill_values=('---'),
names=names)
elif str(tag)[-3:-2] == '0':
db = 'jpl'
else:
print "Check tag value"
return tb_qpart[tb_qpart['tag'] == tag]
def plot():
""""""
temps = np.linspace(0,1000,10000)
qval = compute_qpart(tb, temps)
tb_qpart = get_qpart(tag)
pl.figure(1)
pl.clf()
pl.plot(temps,qval)
for key in tb_qpart.keys()[3:]:
pl.scatter(key, tb_qpart[key])
pl.xscale('linear')
pl.xlim([0,20])
return qval
if __name__ == '__main__':
tag = 28503
T = 9.375
tb = load_spec(tag)
# tb.pprint()
qval = compute_qpart(tb, T)
tb_qpart = get_qpart(tag)
print plot()
| true |
03bd88a85c1f7ae901179421dbe827f02a5d1186 | Python | nehasinghritu8/HealthX | /X-ray fracture detection/bt1.py | UTF-8 | 744 | 2.515625 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
import math
img = cv2.imread('bone1.jpg')
img=cv2.blur(img,(3,3))
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
th,dst=cv2.threshold(img,200,250,cv2.THRESH_BINARY)
dst=cv2.dilate(dst,(7,7),iterations=3)
dst=cv2.erode(dst,(7,7),iterations=3)
cv2.imshow('sthresh',dst)
edges = cv2.Canny(dst,100,400,apertureSize =3)
cv2.imshow('canny',edges)
im2, contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 255, 0), 3)
cv2.imshow('images 2',img)
for c in contours :
perimeter=cv2.arcLength(c,True)
approx=cv2.approxPolyDP(c,0.05*perimeter,True)
cv2.drawContours(img, approx, -1, (0, 0, 255), 7)
cv2.imshow('images 3',img)
cv2.waitKey(0) | true |
da42ea66a8ba7ed54514a3b5ce11c63052d83103 | Python | mmveres/python05_12_2020 | /ua/univer/lesson05/xml_dict/__main__.py | UTF-8 | 459 | 2.671875 | 3 | [] | no_license | import xmltodict
import pprint
import json
with open('data.xml') as fd:
doc = xmltodict.parse(fd.read())
data_txt = json.dumps(doc)
with open("data.json", "w") as file:
file.write(data_txt)
# data_dict = json.loads(data_txt)
with open("data.json", "r", encoding="UTF") as myfile:
data_str = myfile.read()
data_dict = json.loads(data_str)
for elem in data_dict['data']['items']['item']:
print(elem['#text']) | true |
b0add74dae7507c7e2c0688e0e2217531e4154de | Python | lizhenggan/TwentyFour | /01_Language/01_Functions/python/levenshtein.py | UTF-8 | 949 | 3.65625 | 4 | [
"MIT"
] | permissive | # coding: utf-8
def levenshtein(str1, str2, cost_ins=1, cost_rep=1, cost_del=1):
len1, len2 = len(str1), len(str2)
if len1 == 0:
return len2
if len2 == 0:
return len1
if len1 > len2:
str1, str2 = str2, str1
len1, len2 = len2, len1
cost_ins, cost_del = cost_del, cost_ins
dp = list(range(0, len1 + 1))
# j from 1 to len2+1
for j in range(1, len2 + 1):
prev = dp[0]
dp[0] = j
# i from 1 to len1+1
for i in range(1, len1 + 1):
curr = dp[i]
dp[i] = min(dp[i - 1] + cost_del, dp[i] + cost_ins, prev + (0 if str2[j - 1] == str1[i - 1] else cost_rep))
prev = curr
return dp[len1]
if __name__ == '__main__':
str1 = 'carrrot1'
str2 = 'carrots'
print(levenshtein(str1, str2))
print(levenshtein(str2, str1))
print(levenshtein(str1, str2, 2, 3, 4))
print(levenshtein(str2, str1, 2, 3, 4))
| true |
95b0efd24ee7b64d6854806beb0d5502fac03887 | Python | XavierGimenez/procomuns-project-network | /create_tag_network.py | UTF-8 | 1,421 | 2.671875 | 3 | [] | no_license | __author__ = 'xavi'
from collections import Counter
import constants as constants
import itertools
import pandas as pd
import pyUtils
df = pd.read_csv(constants.FOLDER_DATA_DEPLOY + constants.FILE_TAGS_ADDED_2)
df = df.fillna('')
set_tags = set()
#list of possible connections between topics
connections = []
for index, row in df.iterrows():
#get list of tags of the inspected row. We look for these tags through the subset of data frame
#not inspected yet when iterating the rows
tags = row[constants.TAG_COLUMN_NUMBER].split(',')
set_tags |= set(tags)
#get rows not being inspected yet
df2 = df[index+1:]
for index2, row2 in df2.iterrows():
tags2 = row2[constants.TAG_COLUMN_NUMBER].split(',')
connections = connections + list(itertools.combinations(sorted(tags + tags2), 2) )
#start creating the graph structure
output_file = pyUtils.openFile(constants.FOLDER_DATA_DEPLOY + constants.FILE_TAG_NETWORK)
output_file.write("nodedef>name INT, name VARCHAR, activity VARCHAR\n")
for tag in set_tags:
output_file.write('\'' + tag + '\'' + "\n")
# print('edgedef>node1 VARCHAR,node2 VARCHAR,weight DOUBLE')
output_file.write("edgedef>node1 INT,node2 INT, weight DOUBLE\n")
counter = Counter(connections)
for key, count in counter.iteritems():
output_file.write('\'' + key[0] + '\'' + ',' + '\'' + key[1] + '\'' + ',' + str(count) + "\n")
output_file.close() | true |
114a6be23d810e376a18ebc0abaa8341cab141b7 | Python | gungunfebrianza/Belajar-Dengan-Jenius-Python | /src/Class/1. Class.py | UTF-8 | 600 | 4.15625 | 4 | [] | no_license | # CREATE CLASS
class Person:
def __init__(self, firstname, lastname, age, eyecolor):
self.firstname = firstname
self.lastname = lastname
self.age = age
self.eyecolor = eyecolor
def getFullName(self):
print(self.firstname + " " + self.lastname)
# CREATE OBJECT
hooman = Person("Maudy", "Ayunda", 25, "Brown")
# ACCESS PROPERTIES
print(hooman.age) # 25
# ACCESS METHODS
hooman.getFullName() # Maudy Ayunda
# CHANGE PROPERTIES
hooman.lastname = "Ayunda Faza"
hooman.getFullName() # Maudy Ayunda Faza
# DELETE PROPERTIES
# del hooman.age
# DELETE OBJECTS
# del hooman | true |
4d344c9ba60782b3b25a90c8ac7e7fb9c170fe6f | Python | morganstanley/testplan | /releaseherald/releaseherald/plugins/plugin_config.py | UTF-8 | 5,197 | 2.5625 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, Any, Optional, List, DefaultDict
import click
from boltons.cacheutils import cached, LRI
from pydantic import BaseModel
from releaseherald.plugins.interface import CommandOptions
@dataclass
class FromCommandline:
"""
This class can be used to annotate a PluginConfig attribute, it connect the annotated attribute to
the passed commandline
Attributes:
command: the command the option need to attached to
option: the commandline option
"""
command: str
option: click.Option
@dataclass
class UpdateMapping:
field_name: str
option_name: str
@dataclass
class CommandOptionsInfo:
options: List[click.Option] = field(default_factory=list)
update_mappings: List[UpdateMapping] = field(default_factory=list)
class PluginConfig(BaseModel):
"""
A helper base class for easier declarative plugin configuration.
- can be used with [Configuration.parse_sub_config][releaseherald.configuration.Configuration.parse_sub_config]
for easier parsing
- Attributes can be Annotated types, which can contain
[FromCommandline][releaseherald.plugins.plugin_config.FromCommandline] Annotation, that make the config setting
overridable from commandline
#Usage
```python
class MyPluginConfig(PluginConfig):
non_overridable_value: str = "some default
# use type Annotation to connect the attribute wit the commandline Option
overridable_value: Annotated[str, FromCommandline(
"generate",
click.Option(
param_decls=["--override"],
help="override the overrideable value",
)
)] = "default for overrideable value"
class MyPlugin:
@releaseherald.plugins.hookimpl
def process_config(self, config: Configuration):
# parse the config with the helper
self.my_config = config.parse_sub_config("my_config", MyPluginConfig)
@releaseherald.plugins.hookimpl
def get_command_options(self, command: str) -> Optional[CommandOptions]:
# just use the helper to return the right thing
return self.my_config.get_command_options(command)
@releaseherald.plugins.hookimpl
def on_start_command(self, command: str, kwargs: Dict[str, Any]):
# use the helper to reflect commandline overrides in the config
self.my_config.update(command, kwargs)
```
"""
@classmethod
@cached(LRI())
def _get_command_options_info(cls) -> Dict[str, CommandOptionsInfo]:
command_options: DefaultDict[str, CommandOptionsInfo] = defaultdict(
CommandOptionsInfo
)
for field in cls.__fields__.values():
metadata = getattr(field.outer_type_, "__metadata__", None)
if not metadata:
continue
for annotation in metadata:
if not isinstance(annotation, FromCommandline):
continue
command = command_options[annotation.command]
command.options.append(annotation.option)
command.update_mappings.append(
UpdateMapping(field.name, annotation.option.name)
)
return dict(command_options)
def get_command_options(self, command: str) -> Optional[CommandOptions]:
"""
Generate command options from Annotated fields which can be returned directly from
[get_command_options hook][releaseherald.plugins.hookspecs.get_command_options]
Args:
command: the command these command options are registered with
Returns:
The command options that the [get_command_options hook][releaseherald.plugins.hookspecs.get_command_options]
expects
"""
command_options: CommandOptionsInfo = (
self._get_command_options_info().get(command)
)
if command_options:
def default_opts_callback(default_options: Dict[str, Any]):
for update_mapping in command_options.update_mappings:
default_options[update_mapping.option_name] = getattr(
self, update_mapping.field_name
)
return CommandOptions(
command_options.options, default_opts_callback
)
def update(self, command: str, kwargs: Dict[str, Any]) -> None:
"""
Update itself from commandline options, can be used in
[on_start_command hook][releaseherald.plugins.hookspecs.on_start_command]
Args:
command: the command
kwargs: the commandline args for the command
"""
command_options: CommandOptionsInfo = (
self._get_command_options_info().get(command)
)
if command_options:
for update_mapping in command_options.update_mappings:
setattr(
self,
update_mapping.field_name,
kwargs[update_mapping.option_name],
)
| true |
5db57d73fbc6a13fa87916114115787cb0413ded | Python | ashish3x3/competitive-programming-python | /Hackerrank/defaultDictUse.py | UTF-8 | 746 | 3.15625 | 3 | [] | no_license | from collections import defaultdict
d = defaultdict(list)
list1=[]
n, m = map(int,raw_input().split())
for i in range(0,n):
d[raw_input()].append(i+1)
for i in range(0,m):
list1=list1+[raw_input()]
for i in list1:
if i in d:
print " ".join( map(str,d[i]) )
else:
print -1
'''
Hackerrank :
Maths : all easy ones in fundamentals
Number theory : all easy ones
Combinatorics : all easy ones
Geometry : all easy ones
Probability : all easy ones
Linux Shell:
Regex:
Python :
Math: all easy and medium
itertootls: all
Collections: all
class : both
python functionals: all 3
DS:
Trie : all 2
Disjoint set : all 4
heap : all 4
Trees : all easy (total 17 overall)
Balanced trees : all 3
''' | true |
ecc84bec06a81477e8a3f41b1a67a3bc4d6da1fd | Python | Aasthaengg/IBMdataset | /Python_codes/p03963/s238501296.py | UTF-8 | 85 | 2.828125 | 3 | [] | no_license | N,K = map(int,input().split())
ret = K
for i in range(2,N+1):
ret *= K-1
print(ret) | true |
0620634eb819f6870f43cfb43ffb29ffedfda790 | Python | elgraves/Coursera | /Algorithmic-Thinking/Module-1/Application/Citation_Graphs.py | UTF-8 | 9,642 | 3.421875 | 3 | [] | no_license | from __future__ import division
"""
Created on Aug 26, 2014
@author: Joshua Magady
Language: Python 2.x
Script: Citation Graphing
"""
# general imports
import urllib2
#import dateutil
import matplotlib.pyplot as plot
#end general imports
CITATION_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_phys-cite.txt" # URL for Data Set
# below is the programming equivalent to the graphs used for module 1
EX_GRAPH0 = { 0 : set([1,2]),
1 : set([]),
2 : set([])}
EX_GRAPH1 = {0 : set([1,4,5]),
1 : set([2,6]),
2 : set([3]),
3 : set([0]),
4 : set([1]),
5 : set([2]),
6: set([])}
EX_GRAPH2 = {0 : set([1,4,5]),
1 : set([2,6]),
2 : set([3, 7]),
3 : set([7]),
4 : set([1]),
5 : set([2]),
6 : set([]),
7 : set([3]),
8 : set([1, 2]),
9 : set([0, 3, 4, 5, 6, 7])}
# end of graphs
def make_complete_graph(num_nodes):
"""
Given the number of nodes, this returns a dictionary
for all possible edges in the graph (No self loops are
allowed for this example
"""
xgraph = {} #Create a Blank Dict
if num_nodes - 1 < 0: # checks to see if the num_nodes is less then 0 (negative number) if it is return empty graph (dict). Could probably combine the If statments for negative nodes and 1 node together
return xgraph
if num_nodes - 1 == 0: # If the number of nodes is 1 or returns a one node dict because there are no edges to compute
xgraph[0] = set([]) # creates a dict that represents a single node graph as per the requirement
return xgraph # the empty Graph
else:
for base_node in range(num_nodes): # This portion starts the build phase. for each node it will compute the theretical maximum amount of edges
xlist = set([]) # defines an empty list. We first build a list for each node and the append to a dict. This list is erased with each iteration
#print base_node # testing - REMOVE
for edge_node in range(num_nodes):
#print edge_node # testing - REMOVE
if edge_node != base_node: #No Looping is allowed for this project. Therefor we check to insure the we are not counting a self node connection (edge_node NOT equal base_node)
xlist.add(edge_node) # Populating list that will be added to dict
xgraph[base_node] = xlist # Appending created list to the dict
return xgraph # returning populated dict
#end make_complete_graph function
def compute_in_degrees(digraph):
"""
given a directional Graph, this function will compute the total in degrees for each node
"""
print "processing In-Degrees" # Status indicator for long processing times
xgraph = {} # create a blank dict
for node in iter(digraph.viewkeys()): # creates an iter of just the keys in the dict. increase performance for larger data sets maybe? IE only shows the keys
xgraph[node] = 0 # from the list of keys (nodes) creates a new keys for a new dict
for edges in iter(digraph.viewvalues()): # creates an iter of just the values in the dict. increase performance for larger data sets maybe? IE only shows the values
if node in edges: # looks for the nodes in the edges (from dict values)
xgraph[node] += 1 # if node found increase by 1
#print digraph.itervalues()
return xgraph # returns a new dict with nodes as keys and the value is how many in degrees
#end compute_in_degrees function
def in_degree_distribution(digraph):
"""
Given a directional graph, this function will compute the in degree distribution
"""
print "Processing In-Degree Distribution" # Status indicator for long processing times
xgraph = {} #create a blank dict
x_in_degrees = compute_in_degrees(digraph) # This function has already been written. Reusing function
for degrees in iter(x_in_degrees.viewvalues()): # we are counting how many nodes have what degrees so we are in a since doing an inverse of the above function. Converting the degrees to the keys of the dict
if not xgraph.has_key(degrees): # since the same degrees show up multiple times we only want it to show up once (dict keys need to be unique anyways) this keeps errors from being thrown
xgraph[degrees] = 0 # this creates the key and sets an initial value of 0
xgraph[degrees]+= 1 # every time the degree comes up during the the loop it increase the value by 1
return xgraph # returns the final dict
#end in_degree_distribution function
def compute_out_degrees(digraph): # part of answer to question 3
"""
Given a Graph, count the number of out degrees
"""
xgraph = {}
print "Processing the number of Out-Degrees" # Status indicator for long processing times
for node in iter(digraph.viewkeys()): # creates an iter of just the keys in the dict. increase performance for larger data sets maybe? IE only shows the keys
xgraph[node] = len(digraph[node]) # creates node in graph and assigns it to the number of out degrees to begin count
print "Finished Counting Out-Degrees" # Status indicator for long processing times
return xgraph # return created graph with node + count of out degrees
#end compute_out_degrees function
def compute_out_degrees_average(digraph): # part of answer to question 3. Takes input from compute_out_degrees
"""Computes the average Out-Degree for a node in the graph"""
print "Processing average number of Out-Degrees for a node" # Status indicator for long processing times
out_degrees_g = compute_out_degrees(digraph)
number_nodes = len(out_degrees_g.keys())
count_out_degrees = 0
for node in iter(out_degrees_g.viewkeys()):
count_out_degrees += out_degrees_g[node]
print "Finished Processing average number of Out-Degrees for a node" # Status indicator for long processing times
return count_out_degrees / number_nodes
#end compute_out_degrees_average function
def load_graph(graph_url): # Function Provided By instructor - Grabs a specific graph from the internet and converts it to a form we can use
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url) # sets graph_file var to the file downloaded by urlopen
graph_text = graph_file.read() # invokes read on the file downloaded
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
print "Finished processing Out-Degrees"
return answer_graph
#end load_graph function
def normalize_in_degree_distribution(digraph):
"""
Takes a directed graph, computes its unnormalized
in-degree distribution using function in_degree_distribution
Returns normalized distribution
"""
print "Start Normalizing In-Degree Distribution" # Status indicator for long processing times
normalize_dist = {} # Create blank dict
node_count = len(digraph) # get node count
unnormized_dist = in_degree_distribution(digraph) # compute unnormalized dist and save to var
for in_degree in iter(unnormized_dist.viewkeys()): # create and iter view and loop through each value
normalize_dist[in_degree] = unnormized_dist[in_degree] / node_count
print "End Normalizing In-Degree Distribution" # Status indicator for long processing times
return normalize_dist
#end normize_in_degree_distribution function
def plot_normalized_in_degrees(ndigraph):
"""
Creates a log/log plot of the points from a normalized distribution
"""
print "Creating Plot" # Status indicator for long processing times
plot.title('Normalized in-degree distribution (Point graph)') # Sets the name of the graph window
plot.xlabel('In-degrees (log)') # Labels the X cords
plot.ylabel('Normalized Values (log)') # Labels the Y Cords
plot.xscale("log") # Sets the Scale of the graph on x cords to log
plot.yscale("log") # Sets the Scale of the graph on y cords to log
plot.plot(ndigraph.keys(), ndigraph.values(), "go") # creates the graph using the cords. (This case uses the degree distribution as X and the percentage of times it appears as Y. **kwargs as "g" for the color green. "o" for marker type: Circle
plot.show() # Displays the Graph
return # returns nothing
# end plot_normalized_in_degrees function
"""
example usages:
print in_degree_distribution(EX_GRAPH1)
print compute_in_degrees(EX_GRAPH2)
print make_complete_graph(10)
citation_graph = load_graph(CITATION_URL)
print normalize_in_degree_distribution(EX_GRAPH2)
"""
"""
Start of actual program
"""
#print in_degree_distribution(load_graph(CITATION_URL))
#print normalize_in_degree_distribution(load_graph(CITATION_URL))
#plot_normalized_in_degrees(normalize_in_degree_distribution(load_graph(CITATION_URL))) #uncomment to create citation graph
#load_graph(CITATION_URL) # part of answer to question 3
#print compute_out_degrees(load_graph(CITATION_URL))
#print compute_out_degrees_average(load_graph(CITATION_URL)) # gives you complete answer for question 3. n = number of nodes loaded. m = average out degree
| true |
4189a30dce429f6b5e90d2fd361e70c8e8e40745 | Python | lessrest/danceschool | /root/usr/local/bin/dance-update-todays-classes | UTF-8 | 1,451 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python2
import json
import vobject
import dateutil.rrule as rrule
from datetime import datetime
import smtplib
from email.mime.text import MIMEText
today = datetime.today().date()
# Start with no classes today
classes = {}
# Read and parse the calendar file
cal = vobject.readOne(open("/var/dance/calendar.ics", "r").read())
# Loop over all the recurring events in the calendar
bad = []
for recurring in cal.vevent_list:
if recurring.rruleset is None:
bad.append(recurring)
times = [recurring.dtstart.value]
else:
times = list(recurring.rruleset)
name = recurring.summary.value
# Any on today's date?
if any(x.date() == today for x in times):
# If so, then keep it with name and length in minutes
duration = recurring.dtend.value - recurring.dtstart.value
classes[name] = {
"minutes": int(duration.total_seconds() / 60)
}
# Save today's classes to a JSON file
with open("/var/dance/today/classes", "w") as f:
json.dump(classes, f)
# Send email if some events were non-recurring
# if len(bad) > 0:
# msg = MIMEText("\n".join(
# ["%s %s" % (x.dtstart.value, x.summary.value) for x in bad]
# ))
# msg["Subject"] = "Warning: non-recurring events"
# msg["From"] = "root@raspberrypi"
# msg["To"] = "root"
# s = smtplib.SMTP("localhost")
# s.sendmail("root@raspberrypi", ["root"], msg.as_string())
# s.quit()
| true |
4e2d2da971031009cf5eb52f2a9dec26067ab9a5 | Python | Sadanand-Prajapati/API_Data | /API_Data_Scrapping.py | UTF-8 | 6,206 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 11:52:16 2020
@author: Sadanand
"""
import requests
import json
import pandas as pd
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import pandas as pd
import pymysql
from sqlalchemy import create_engine, Column, Integer, String
import MySQLdb
#Get API data class.
class get_api_data:
def __init__(self,date,zip_code,line_up_id,date_time):
self.date = date
self.zip_code = zip_code
self.date_time = date_time
self.line_up_id = line_up_id
def get_movies_playing_in_local_theaters(self):
url = 'http://data.tmsapi.com/v1.1/movies/showings?startDate={date}&zip={zip_code}&api_key={api_key}'.\
format(date=self.date,zip_code=self.zip_code,api_key=os.environ.get('api_key'))
response = requests.get(url)
data = response.json()
df = pd.DataFrame.from_dict(data)
return df
def get_movies_airing_on_TV(self):
url = 'http://data.tmsapi.com/v1.1/programs/newShowAirings?lineupId={line_up_id}&startDateTime={date_time}&api_key={api_key}'.\
format(line_up_id=self.line_up_id,date_time=self.date_time,api_key=os.environ.get('api_key'))
response = requests.get(url)
data = response.json()
df = pd.DataFrame.from_dict(data)
return df
#Creating data models
Base = declarative_base()
class create_table_theater_movies(Base):
__tablename__ = 'theater_movies'
id = Column(Integer,primary_key=True)
title = Column(String(100))
release_year = Column(Integer)
genres = Column(String(50))
description = Column(String(300))
channel_name_theater_name = Column(String(50))
class create_table_TV_movies(Base):
__tablename__ = 'tv_movies'
id = Column(Integer,primary_key=True)
title = Column(String(100))
release_year = Column(Integer)
genres = Column(String(50))
description = Column(String(300))
channel_name_theater_name = Column(String(50))
#User inpit to fetch the API data.
#======================================
date = '2020-11-18'
zip_code = '78701'
line_up_id = 'USA-TX42500-X'
date_time = '2020-11-18T16:00Z'
#=======================================
#Calling the API to get the data.
obj = get_api_data(date,zip_code,line_up_id,date_time)
local_theaters_df = obj.get_movies_playing_in_local_theaters()
#Cleaning API response for Theaters data.
df_table1 = local_theaters_df[['title','releaseYear','genres','longDescription','showtimes']]
df_table1['showtimes'] = df_table1['showtimes'].map(lambda x:x[0]['theatre']['name'])
df_table1.rename(columns={'showtimes':'channel_name_theater_name','longDescription':'description',
'releaseYear':'release_year'},inplace=True)
df_table1['genres'] = df_table1['genres'].map(lambda x:','.join(x) if str(x) != 'nan' else x)
df_table1 = df_table1[['title','release_year','genres','description','channel_name_theater_name']]
#Cleaning API response for TV data.
line_up_id_df = obj.get_movies_airing_on_TV()
df_table2 = line_up_id_df[['channels','program']]
df_table2['title'] = df_table2.apply(lambda x:x['program']['title'],axis=1)
df_table2['release_year'] = df_table2.apply(lambda x:x['program']['releaseYear'] if \
'releaseYear' in x['program'].keys() != True else '',axis=1)
df_table2['genres'] = df_table2.apply(lambda x:x['program']['genres'] if \
'genres' in x['program'].keys() != True else '' ,axis=1)
df_table2['genres'] = df_table2['genres'].map(lambda x:','.join(x) if str(x) != 'nan' else x)
df_table2['description'] = df_table2.apply(lambda x:x['program']['longDescription'] if \
'longDescription' in x['program'].keys() != True else '' ,axis=1)
df_table2['channel_name_theater_name'] = df_table2['channels'].map(lambda x:','.join(x) if str(x) != 'nan' else x)
df_table2 = df_table2[['title','release_year','genres','description','channel_name_theater_name']]
#=================================================================================================
#Loading Cleaned data in mysql data tables.
cnx = create_engine('mysql+pymysql://os.environ.get('user'):os.environ.get('password')@localhost:3306/os.environ.get('database_name')')
Session = sessionmaker(bind=cnx)
session = Session()
Base.metadata.create_all(cnx)
#Writing pandas dataframes in table.
df_table1.to_sql('theater_movies',con=cnx,index=False,if_exists='replace',method='multi')
df_table2.to_sql('tv_movies',con=cnx,index=False,if_exists='replace',method='multi')
#Reading data from tables into dataframes.
mysql_cn= MySQLdb.connect(host='localhost',
port=3306,user=os.environ.get('user'), passwd=os.environ.get('password'),
db=os.environ.get('database_name'))
theater_movies = pd.read_sql('select * from theater_movies;', con=mysql_cn)
theater_movies['genres'] = theater_movies['genres'].map(lambda x:x.split(',') if x is not None else '')
theater_movies = theater_movies.explode('genres')
#--------------------------------------------------------------------------------------------------------
tv_movies = pd.read_sql('select * from tv_movies;', con=mysql_cn)
tv_movies['genres'] = tv_movies['genres'].map(lambda x:x.split(',') if x is not None else '')
tv_movies = tv_movies.explode('genres')
#Combining the data from both the tables to get the groups.
merged_df = pd.concat([theater_movies,tv_movies])
res_data = merged_df.groupby('genres')['title'].count()
res_data = pd.DataFrame(res_data)
res_data = res_data.reset_index()
res_data.rename(columns={'title':'title_count'},inplace=True)
res_data = res_data.sort_values('title_count',ascending = False).head(5)
mysql_cn.close()
#=========================================================
#Printing the data as per question asked.
#Top 5 genres are:
print (res_data)
| true |
265b1f796fa84a13aa911b76008c86df623ae9f7 | Python | Yuriy-Leonov/python-rabbitmq-example | /samples/example_shared_channel.py | UTF-8 | 997 | 2.53125 | 3 | [
"MIT"
] | permissive | import asyncio
import json
import time
from utils import connector
from utils import funcs
QUEUE_NAME = "example_shared_channel"
i = 0
async def send_message_with_shared_channel():
global i
conct = connector.Connector()
shared_channel = await conct.get_channel()
await shared_channel.basic_publish(
body=json.dumps({
"some": "obj"
}).encode(),
routing_key=QUEUE_NAME
)
i += 1
# print(f"finish. total = {i}")
async def main():
await funcs.declare_queue(queue_name=QUEUE_NAME)
tasks = [
send_message_with_shared_channel()
for _ in range(10000)
]
await asyncio.gather(*tasks)
# close connection
conct = connector.Connector()
conn = await conct.get_connection()
await conn.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
start = time.time()
loop.run_until_complete(main())
print(f"time execution: {time.time() - start:.2f} sec")
loop.close()
| true |
063cd980237ee3c261d62adbc45f620b790cbf1a | Python | KarAbhishek/MSBIC | /All_is_code/10_600_AutomaticCode.py | UTF-8 | 7,404 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 21 01:00:11 2016
"""
import numpy as np
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
import pandas as pd
#from sklearn.linear_model import LinearRegression
meanTrain = np.array([0,0])
stdTrain = np.array([0,0])
def standardize(X, standardizeOnlyX):
modX = X
#print(X)
global meanTrain
global stdTrain
#print(np.array_equal(meanTrain, np.array([0,0])) and np.array_equal(stdTrain, np.array([0,0])))
if(np.array_equal(meanTrain, np.array([0,0])) and np.array_equal(stdTrain, np.array([0,0]))):
meanTrain = np.mean(X, axis = 0)
stdTrain = np.std(X, axis = 0, ddof=1)
#print(meanTrain,stdTrain)
if standardizeOnlyX:
for colIdx in range(len(X[0])):
modX[:, colIdx] = (X[:, colIdx]-meanTrain[colIdx+1])/stdTrain[colIdx+1]
return modX
for colIdx in range(len(X[0])):
modX[:, colIdx] = (X[:, colIdx]-meanTrain[colIdx])/stdTrain[colIdx]
#Y[0] = Y[0]-np.mean(Y[0], axis = 0)/np.std(Y[0], axis = 0)
#print(modX)
np.savetxt("10_600_Mean.csv", meanTrain, delimiter=",", fmt='%s')
np.savetxt("10_600_Std.csv", stdTrain, delimiter=",", fmt='%s')
return modX
def dummyCodingAttempt2(certainfeature):
uniqueFeatureValues = np.unique(certainfeature)
lsBnary = [0 for i in range(len(uniqueFeatureValues))]
origBnary = list(lsBnary)
#lsOfBnaryStrings = ['a' for x in range(len(certainfeature))]
lsOflsBinaries = [[] for x in range(len(certainfeature))]
for index in range(uniqueFeatureValues.shape[0]):
lsOfLocations = np.where(certainfeature == uniqueFeatureValues[index])
for i in np.nditer(lsOfLocations):
#print(i)
lsBnary[index] = 1
lsOflsBinaries[i] = list(lsBnary)
#print(''.join(lsBnary))
lsBnary = list(origBnary)
#lsOfBnaryStrings = [''.join(lsOflsBinaries[i]) for i in range(len(lsOflsBinaries))]
#print(np.array(lsOflsBinaries))
return lsOflsBinaries#lsOfBnaryStrings
def L_loss(Y, W, X, isStandardized):
#print(W.shape)
#print(X.shape)
prediction = generatePrediction(X,W, isStandardized)
np.savetxt("10_600_before_std_prediction.csv", prediction, delimiter=",", fmt='%s')
prediction = reverseStandardization(prediction)
print(prediction)
np.savetxt("10_600_after_std_prediction.csv", prediction, delimiter=",", fmt='%s')
residual = np.abs(Y - prediction)
#print(residual)
#plt.scatter(range(1, len(residual)+1), residual)
#outlierRemoval(residual)
return np.mean(residual**2)
def generatePrediction(X,W, isStandardized):
if(isStandardized):
X = standardize(X, True)
return np.dot(X, W)
def linReg(X, Y):
return np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Y))
def reverseStandardization(Y):
global meanTrain
global stdTrain
#ret = np.zeros(len(Y))
#print(len(Y))
ret = (Y*stdTrain[0]+meanTrain[0])
#for colIdx in range(len(Y)):
#ret[colIdx] = (Y[colIdx]*stdTrain[colIdx]+meanTrain[colIdx])
print(stdTrain[0])
print(meanTrain[0])
return ret
def linRegAndLossCalc(Xtrain, isStandardized, testAndTrainDividingRange):
#print(Xtrain[0:400, :])
if(isStandardized):
train = standardize(Xtrain[0:testAndTrainDividingRange, :], False)
else:
train = Xtrain
test = Xtrain[testAndTrainDividingRange:Xtrain.shape[0], :]
train = outlierRemoval(train)
ytrain = train[:, 0]
Xtrain = train[:, 1:]
Xtest = test[:, 1:]
ytest = test[:, 0]
W = linReg(Xtrain, ytrain)
#print(W)
return L_loss(ytest, W, Xtest, isStandardized)
def morphFeaturesAfterDummyCoding(Xtrain):
inputs = Xtrain[:, 1:]
categoricalInputs = inputs[:, [-2, -1]]
modifInput = Xtrain[:, :-2]# +
return (np.concatenate((modifInput, dummyCodingAttempt2(categoricalInputs[:, 0]), dummyCodingAttempt2(categoricalInputs[:, 1])), axis = 1 ))
def outlierRemoval(Xtotal):
W = linReg(Xtotal[:, 1:], Xtotal[:,0])
prediction = generatePrediction(Xtotal[:, 1:],W, False)
#prediction = reverseStandardization(prediction)
residual = Xtotal[:,0] - prediction
#print(sorted(residual))
#print(np.where(residual>3.5))
#print(np.where(residual<-2.8))
Xtotal = np.delete(Xtotal, np.append(np.where(residual>3.5), np.where(residual<-2.8)) , axis = 0)
#plt.scatter(range(len(residual)), residual)
#plt.show()
return Xtotal
def forwardStageWise(Xtotal, testAndTrainDividingRange, XtestInput):
#Xtotal = standardize(Xtotal, False)
#Y = Xtotal[:, 0]
train = Xtotal[0:testAndTrainDividingRange, :]
train = standardize(train, False)
train = outlierRemoval(train)
Xtrain = train[:, 1:]
Y = train[:, 0]
#Xtrain = standardize(Xtrain, False)
np.savetxt("10_600_fwdstg_XTrain.csv", Xtrain, delimiter=",", fmt='%s')
if(XtestInput == None):
Xtest = Xtotal[testAndTrainDividingRange:, 1:]
Ytest = Xtotal[testAndTrainDividingRange:, 0]
else:
Xtest = XtestInput
Ytest = np.zeros(Xtest.shape[0])
#print(Xtrain.shape[1])
W = np.zeros(Xtrain.shape[1])
grad = np.zeros(Xtrain.shape[1])
res = Y[:testAndTrainDividingRange]
#bestfeat = 0
for iter in range(500):
#R = res - np.dot(Xtrain, W).flatten()
#grad = (np.dot(Xtrain.T, R)+ (0.4*W).flatten())/Xtotal.shape[0];
grad = (np.dot(Xtrain.T, res) + (0.4*W).flatten())/Xtrain.shape[0]
bestfeat = np.argmax(abs(grad))
W[bestfeat] = W[bestfeat] + grad[bestfeat]
res = res - Xtrain[:, bestfeat] * grad[bestfeat];
print(W)
return L_loss(Ytest, W, Xtest, True)
'''
def polynomialFeatureAddition(Xtotal):
X1=Xtotal[:, 2]
X2=Xtotal[:, 4]
X3=Xtotal[:, 6]
XRemaining = Xtotal[:, 1] + Xtotal[:, 3] + Xtotal[:, 5] + Xtotal[:, 7:]
polyFeatureSet = [X1**2, X2**2, X3**2, X1*X2, X2*X3, X1*X3]
return XRemaining+polyFeatureSet
'''
def polynomialFeatureAddition(X):
print (X.shape)
f1 = np.array([[elem**2] for elem in X[:,5]])
f2 = np.array([[elem**2] for elem in X[:,7]])
f3 = np.array([[elem[0]*elem[2]] for elem in X[:,5:(7+1)]])
return np.concatenate((X,f1,f2,f3),axis=1)
Xtotal = np.genfromtxt('regression_hw.csv', delimiter = ',')
#plt.scatter(Xtotal, Xtotal)
#scatter_matrix(pd.DataFrame(Xtotal), alpha=0.2, figsize=(6, 6), diagonal='kde')
Xtotal = morphFeaturesAfterDummyCoding(Xtotal)
Xtotal = polynomialFeatureAddition(Xtotal)
#np.savetxt("10_600_Dum.csv", Xtotal[0:400, :], delimiter=",", fmt='%s')
#Xtotal = outlierRemoval(Xtotal)
#np.savetxt("10_600_Standardize.csv", standardize(Xtotal[0:400, :], False), delimiter=",", fmt='%s')
#print(Xtrain.shape)
#print(linRegAndLossCalc(Xtotal, True, 400))
#print(apply_cv(pipeline,Xtotal[1:], Xtotal[0],4,scorer=mse))
XtestInput = np.genfromtxt('regression_hw_testx.csv', delimiter = ',')
XtestInput = morphFeaturesAfterDummyCoding(XtestInput)
XtestInput = polynomialFeatureAddition(XtestInput)
print(forwardStageWise(Xtotal, Xtotal.shape[0], XtestInput))
| true |
cc7ae15fe8afb147e2b95b34a274be1cd661d8f7 | Python | averagehat/biolearn | /func.py | UTF-8 | 7,116 | 2.53125 | 3 | [] | no_license | from functools import partial, wraps
import itertools as it
import string
import sys
from collections import namedtuple
from operator import itemgetter, attrgetter as attr
from schema import Schema, SchemaError
PY3 = sys.version[0] == '3'
imap, ifilter, izip = (map, filter, zip) if PY3 else (it.imap, it.ifilter, it.izip)
#notin = compose(_not, operator.methodcaller('__contains__'))
#notin = compose(_not, attr('__contains__'))
#mismatches = pfilter(notin('M='))
def merge_dicts(*dict_args):
'''
from http://stackoverflow.com/a/26853961
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def _not(x):
return not x
def partial2(method, param):
def t(x):
return method(x, param)
return t
def _id(x): return x
def apply_key_func(k, v, funcdict):
return funcdict.get(k, _id)(v)
def compose_all(*funcs):
return reduce(compose, funcs)
#def k_compose(outer, **okwargs):
# ''' compose(f, g)(x) == f(g(x)) '''
# def newfunc(*args, **ikwargs):
# _kwargs = dict( (k, apply_key_func(k, v, okwargs)) for k, v in ikwargs.items())
# return outer(*args, **_kwargs)
# return newfunc
def compose(outer, inner):
''' compose(f, g)(x) == f(g(x)) '''
def newfunc(*args, **kwargs):
return outer(inner(*args, **kwargs))
return newfunc
starcompose2 = lambda f, g: lambda x: f(*g(x))
#starcompose = partial(reduce, starcompose2) #need splat
def starcompose(*funcs):
return reduce(starcompose2, funcs)
def compose(outer, inner):
''' compose(f, g)(x) == f(g(x)) '''
def newfunc(*args, **kwargs):
return outer(inner(*args, **kwargs))
return newfunc
def fzip(funcs, args):
for func, arg in izip(funcs, args):
yield func(arg)
def dictmap(func, _dict):
return dict( (key, func(val)) for key, val in _dict.items())
def ilen(iterable):
return sum(1 for _ in iterable)
def reverse(collection): return collection[::-1]
pifilter = partial(partial, ifilter)
compose_list = partial(reduce, compose)
# compose_all = compose(compose_list, lambda *a: a)
pmap = partial(partial, map)
pfilter = partial(partial, filter)
#TODO: could use partial2 instead
pstrip = lambda x: partial(string.strip, chars=x)
psplit = lambda x: partial(string.split, sep=x)
pjoin = lambda x: partial(string.join, sep=x)
boolint = lambda x: 1 if x else 0
dictzip = compose(dict, zip)
cmp2=lambda f, g: lambda a, b: (f(a), g(b))
#ilen = compose(sum, pmap(boolint))
#Given a list of functons and names, return the result of those functions dictzipped witht the names.
#TODO:
''' dictfilter '''
def apply_each(funcs, arg):
return fzip(funcs, it.repeat(arg))
import inspect
import types
def is_local(object):
return isinstance(object, types.FunctionType) and object.__module__ == __name__
#use inspect.isfunction
def get_funcs():
return inspect.getmembers(sys.modules[__name__], \
predicate = lambda f: inspect.isfunction(f) and f.__module__ == __name__)
#return inspect.getmembers(sys.modules[__name__], predicate=is_local)
#return dict( ((name, func)) for name, func in locals().items() if is_local(name))
# for key, value in locals().items():
# if callable(value) and value.__module__ == __name__:
# l.append(key)
'''
compose columns + object + getters => dict
- unzip
have fqframe return a dict of functions, excluding get_row, openframe; instead passing it to a function which
arranges the getters, applies them to a get_object function, and creates an intermediate dictionary.
This function allows for optional extras, like samframe & fastq (rather than fasta
'''
unzip = starcompose(zip, _id)
def nameddict(Name, _dict):
''' dict to named tuple '''
names, values = unzip(_dict.items())
return namedtuple(Name, names)(*values)
ppartial = partial(partial)
apply_to_object = compose(apply, ppartial)
kstarcompose2 = lambda f, g: lambda x: f(**g(x))
def kstarcompose(*funcs):
return reduce(kstarcompose2, funcs)
#kstarcompose = partial(reduce, kstarcompose2)
#use str.endswith( (tuple, of, vals)
extension = compose(itemgetter(-1), psplit('.'))
fileext = compose(extension, attr('filename'))
def iter_until_stop(f, *args, **kwargs):
while True:
try:
yield f(*args, **kwargs)
except StopIteration:
break
flatten_list = lambda a: a if type(a) != list else a[0]
def split_list(A, idx):
return A[:idx], A[idx:]
'''http://docs.python.org/3.4/library/itertools.html#itertools-recipes'''
def partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
http://docs.python.org/3.4/library/itertools.html#itertools-recipes
"""
t1, t2 = it.tee(iterable)
return it.ifilterfalse(pred, t1), it.ifilter(pred, t2)
_and2 = lambda f, g: lambda x: f(x) and g(x)
_and = lambda *x: reduce(_and2, x)
_not = lambda f: lambda x: not f(x)
_or = lambda f, g: lambda x: f(x) or g(x)
bnot = lambda x: ~x
#def pool_map(func, *args, **kwargs):
# pool = multiprocessing.Pool()
# pool
#
def rpartial(func, *args):
return lambda *a: func(*(a + args))
message = "in function {0}, arg {1} is not an instance of {2}, but is type {3}.".format
def check_type(func, _type, obj):
assert isinstance(obj, _type), message(func.__name__, obj, _type, type(obj))
#get_type = partial(getattr, __module__)
def dict_intersect(d1, d2):
#return dict(d1.viewitems() & d2.viewitems())
return {x:d1[x] for x in d1 if x in d2}
functype = 'FUNCTYPE'
def handle_check(etype, arg, kwargs, func):
if type(etype) is dict:
scheme, matched_args = dict_intersect(etype, kwargs), dict_intersect(kwargs, etype)
try:
Schema(scheme).validate(matched_args)#, error=message(func.__name__, arg, etype))
except SchemaError as e:
print "scheme {0} did not fit kwargs {1}".format(scheme, kwargs)
raise e
elif arg is None or etype is None:
#TODO: log?
#print "Warning " + message(func.__name__, arg, etype)
return
elif etype == functype:
assert hasattr(arg, '__call__')
else:
check_type(func, etype, arg)
#TODO:
'''
from string import splitfields
if type(etype) is tuple:
handle *args (how?)
if etype.startswith('has'):
attrs = etype.split(' ')[1:]
for _attr in attrs:
assert hasattr(arg, _attr), "arg {0} did not have attribute {1} in function {2}".format(arg, _attr, func.__name__)
'''
''' could be: *types, **kwtypes '''
def typecheck(*types):
def decorator(func):
@wraps(func)
def typechecked(*args, **kwargs):
check = partial(handle_check, func=func, kwargs=kwargs)
map(check, types, args)
return func(*args, **kwargs)
return typechecked
return decorator
| true |
aee7db33e1b809b3f6599c867ff78cb9cd35618c | Python | cardadfar/Object-Illum | /bbox.py | UTF-8 | 1,677 | 3.21875 | 3 | [] | no_license | import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
class BBox:
''' x: starting x position
y: starting y position
wth: width of box
hgt: height of box
'''
def __init__(self, x, y, wth, hgt):
global global_id_indx
self.x = int(x)
self.y = int(y)
self.wth = int(wth)
self.hgt = int(hgt)
def are_simmilar(bbox1, bbox2, img_wth, img_hgt):
''' returns true if bboxes are too simmilar, false else
bbox1: bounding box 1
bbox2: bounding box 2
img_wth: width of image that bboxes lie on
img_hgt: height of image that bboxes lie on
'''
area_threshold = 0.95
width_threshold = img_wth * 0.05
height_threshold = img_hgt * 0.05
x1_min = bbox1.x
y1_min = bbox1.y
x1_max = bbox1.x + bbox1.wth
y1_max = bbox1.y + bbox1.hgt
x2_min = bbox2.x
y2_min = bbox2.y
x2_max = bbox2.x + bbox2.wth
y2_max = bbox2.y + bbox2.hgt
x_min = max(x1_min, x2_min)
x_max = min(x1_max, x2_max)
y_min = max(y1_min, y2_min)
y_max = min(y1_max, y2_max)
bbox1_area = (x1_max - x1_min) * (y1_max - y1_min)
bbox2_area = (x2_max - x2_min) * (y2_max - y2_min)
area_min = min(bbox1_area, bbox2_area)
shared_area = (x_max - x_min) * (y_max - y_min)
area_intersection = shared_area / area_min
width_simmilarity = abs(bbox1.wth - bbox2.wth)
height_simmilarity = abs(bbox1.hgt - bbox2.hgt)
if((area_intersection > area_threshold) and (width_simmilarity < width_threshold) and (height_simmilarity < height_threshold)):
return True
return False
| true |
941af6872a6ea3b8236869ed25414eb1f3cd99f7 | Python | jjpatel361/machinelearning | /code/knn_classifier.py | UTF-8 | 2,582 | 2.984375 | 3 | [] | no_license | ''''
Nearest Neighbour Classifier
:arg dataset.data
:arg trainlabel.0
:arg eta
'''
import sys;
import os;
'''
Read the data set file and labels file
'''
ds_file = sys.argv[1];
fh = open(ds_file, mode='r');
dataset = [];
for line in fh:
arr = line.split();
arr = [float(i) for i in arr];
dataset.append(arr);
labels_file = sys.argv[2];
labels = {};
fh = open(labels_file, mode= 'r');
for line in fh:
arr = line.split();
labels[arr[1]] = float(arr[0]);
fh.close()
#print('Dataset',dataset)
#print('Labels',labels)
rows = len(dataset);
cols = len(dataset[0]);
'''
Split dataset into seperate classes
'''
classes = {};
testing = {};
for index in range(rows):
xi = dataset[index];
if str(index) in labels:
yi = int(labels.get(str(index)));
if str(yi) not in classes:
# Add class and then add item
classes[str(yi)] = [];
classes[str(yi)].append(xi);
else:
# use sample for testing
#testing.append(xi);
testing[index] = xi;
#print('Classes', classes)
'''
Find mean of each feature Xi
'''
mean_c = {};
for i in range(len(classes)):
ds = classes.get(str(i));
yi = i;
mean = [];
# Find mean of each column
for k in range(cols):
feature_vec = [i[k] for i in ds];
mean_k = sum(feature_vec)/len(feature_vec);
mean.append(mean_k);
mean_c[yi] = mean;
#print('Mean ',mean_c)
'''
Prediction on test
Those samples whose labels are not found are used for testing.
'''
def eculideanDistance(a, b):
dist = 0;
if(len(a)==len(b)):
for i in range(len(a)):
dist += (a[i]-b[i])**2;
return dist**0.5
def nearestNeighbour(result):
result_class = min(result.items(), key=lambda x: x[1])[0]
return result_class
## Prepare output directory and write result to it.
current_dir = os.getcwd();
result_dir = os.path.join(os.getcwd(),'knn_results');
if not os.path.exists(result_dir):
os.makedirs(result_dir)
#else:
# os.removedirs(result_dir)
# os.makedirs(result_dir)
output_file = os.path.join(result_dir,'knn_result.'+sys.argv[3]+'.txt');
f = open(output_file,'w');
for index, xtest in testing.items():
result = {};
# Find mean for all classes
for l, mc in mean_c.items():
#print('Label',l,'MC',mc)
dist = eculideanDistance(xtest, mc);
result[l] = dist;
#print(result)
# Find class of nearest mean to the sample
predicted_class = nearestNeighbour(result);
print(predicted_class,index,file=f);
| true |
a0946ea6490622b82933344f247ee55657696e49 | Python | reymarkus/pyxiv-dl-reborn | /pyxiv-dl/__main__.py | UTF-8 | 3,666 | 3.078125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """pyxiv-dl main script
This is the main script that executes the main pyxiv-dl argument parser.
"""
import argparse, sys, textwrap
from webcrawler import PixivWebCrawler
from pyxivhelpers import *
# constants
"""Script version"""
PYXIVDL_VERSION = "0.5.2"
"""Main function for accepting download args"""
def main():
# load argparse here
argParser = argparse.ArgumentParser(
description="pyxiv-dl: Downloads full-sized arts from Pixiv",
usage="pyxiv-dl.py [options] <id>",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent("""
ADDITIONAL NOTES
The -r/--range option lets you to download images in a multi-image post on a
specified range. This is silently ignored for single and ugoira posts. For the
-r/--range option, the format it accepts is the following:
\tx,y
where 0 > x > y. x denotes the start of the image index to download and y denotes the
end of the image index. If y exceeds the total number of posts, it will be silently
ignored and will download up to the last image index.
These are the valid formats accepted by the -r/--range option:
\t1,4\tDownloads images from index 1 to 4
\t4,6\tDownloads images from index 4 to 6
\t4,\tDownloads images from index 4 up to the last
\t,5\tDownloads images from the start up to index 5
Anything not in the valid formats are considered invalid.
""")
)
argParser.add_argument(
"-i",
"--index",
help="Download a specific image on a multi image post based on its index. Cannot be combined with -r/--range",
action="store",
type=int
)
argParser.add_argument(
"-r",
"--range",
help="Download images from a specified range using a from,to format. Cannot be combined with -i/--index. "
"See help for more info",
action="store"
)
# add NSFW confirmation bypass
argParser.add_argument(
"-n",
"--nsfw",
help="Always allow NSFW image download. If not set, you are asked to confirm the download first",
action="store_true"
)
# add verbose argument
argParser.add_argument(
"-v",
"--verbose",
help="Show verbose output",
action="store_true"
)
# show script version
argParser.add_argument(
"-V",
"--version",
help="Show the application's version and exit",
action="version",
version="%(prog)s v{}".format(PYXIVDL_VERSION)
)
# main argument: pixiv art IDs
argParser.add_argument(
"id",
help="your Pixiv medium ID to get original-sized images or ugoira from",
action="store"
)
# set parsed args variable
parsedArgs = argParser.parse_args()
# validate inputs first
# check first for valid pixiv IDs
if not validatePostIdRegex(parsedArgs.id):
print("One or more inputs is not a valid Pixiv post ID. Aborting.")
sys.exit(1)
if parsedArgs.range is not None and not validateRange(parsedArgs.range):
print("Range parameter is incorrect. See help for more info.")
sys.exit(1)
# run scraper
pxCrawl = PixivWebCrawler(
parsedArgs.id,
parsedArgs.verbose,
parsedArgs.nsfw,
parsedArgs.range,
parsedArgs.index
)
PixivWebCrawler.downloadImages(pxCrawl)
# main call
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nKeyboard interrupt detected. Aborting.") | true |
d00ce07af2752deebce8d40620f75e6f7ca69300 | Python | himanshu2801/Geeksforgeeks | /Maximum Index.py | UTF-8 | 1,197 | 3.78125 | 4 | [] | no_license | """
Given an array A[] of N positive integers. The task is to find the maximum of j - i subjected to the constraint of A[i] <= A[j].
Example 1:
Input:
N = 2
A[] = {1,10}
Output: 1
Explanation: A[0]<=A[1] so (j-i)
is 1-0 = 1.
Example 2:
Input:
N = 9
A[] = {34,8,10,3,2,80,30,33,1}
Output: 6
Explanation: In the given array
A[1] < A[7] satisfying the required
condition(A[i] <= A[j]) thus giving
the maximum difference of j - i
which is 6(7-1).
Your Task:
The task is to complete the function maxIndexDiff() which finds and returns maximum index difference. Printing the output will be handled by driver code.
Constraints:
1 ≤ N ≤ 107
0 ≤ A[i] ≤ 1018
Expected Time Complexity: O(N).
Expected Auxiliary Space: O(N).
"""
def maxIndexDiff(arr, n):
left,right = [],[arr[n-1]]*n
left.append(arr[0])
for i in range(1,n):
left.append(min(arr[i],left[i-1]))
for i in range(n-2,-1,-1):
right[i] = max(right[i+1], arr[i])
first,second,ans=0,0,0
while first<n and second<n:
if left[first] <= right[second]:
ans = max(ans, second-first)
second = second + 1
else:
first = first + 1
return ans
| true |
6b49faf6d977aa2cf6af7608c2ea15e576bd646b | Python | hanbule/telegram-bomber | /tools/proxy_grabber.py | UTF-8 | 2,224 | 2.5625 | 3 | [
"MIT"
] | permissive | import datetime
import random
import time
import requests
from tools import config
from handlers import text
def grab(logger, database, token):
if text.active_grabber:
logger('ProxyGrabber', 'Getting proxys from Proxoid.net')
else:
return
proxys = [proxy for proxy in requests.get(
"https://proxoid.net/api/getProxy",
params={
"key": token,
"countries": ",".join(["RU", "UA", "UZ", "BY", "DE", "NL", "PH"]),
"types": ",".join(["https"]),
"level": ",".join(["transparent", "high", "anonymous"]),
"speed": 2000,
"count": 0
}).text.splitlines()]
[proxys.append(proxy) for proxy in database.get_proxys()]
logger('ProxyGrabber', f'Check {len(proxys)} proxys...')
random.shuffle(proxys)
live_count = 0
dead_count = 0
for proxy in proxys:
time.sleep(5)
try:
timestamp = datetime.datetime.now().timestamp()
try:
proxy_url = "http://" + proxy
except TypeError:
break
requests.get('https://ramziv.com/ip', timeout=3,
proxies=dict(http=proxy_url,
https=proxy_url))
ping = round((datetime.datetime.now().timestamp() - timestamp) * 1000)
if ping < config.max_proxy_ping:
live_count += 1
if proxy not in database.get_proxys():
database.append_proxy(True, proxy)
logger('ProxyGrabber', f'Find proxy ({live_count}/{dead_count}): {proxy} with ping {ping}ms')
continue
else:
dead_count += 1
if proxy in database.get_proxys():
database.append_proxy(False, proxy)
continue
except (Exception, BaseException, requests.exceptions.ConnectionError):
dead_count += 1
if proxy in database.get_proxys():
database.append_proxy(False, proxy)
continue
text.active_grabber = False
logger('ProxyGrabber', 'Good bye!')
| true |
44c4127ffe96eef8847488b387612862d18ab785 | Python | andrely/sublexical-features | /ExperimentSupport/experiment_support/experiment_runner.py | UTF-8 | 21,457 | 2.59375 | 3 | [] | no_license | import logging
import os
import sys
import multiprocessing
import time
from gensim.corpora import TextCorpus
from gensim.corpora.dictionary import Dictionary
from gensim.models import Word2Vec
from gensim.utils import chunkize_serial, InputQueue
from scipy import sparse
from numpy import mean, std, zeros, array
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.cross_validation import cross_val_score, KFold
from sklearn.feature_extraction.text import CountVectorizer, VectorizerMixin
from sklearn.metrics import accuracy_score, f1_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, LabelBinarizer, MinMaxScaler
from sklearn.svm import LinearSVC
from shared_corpora.newsgroups import ArticleSequence, GroupSequence, newsgroups_corpus_path
from experiment_support.preprocessing import mahoney_clean, sublexicalize
from sublexical_semantics.vectorizers import BrownClusterVectorizer
def inclusive_range(a, b=None):
if not b:
return range(a + 1)
else:
return range(a, b + 1)
class TopicPipeline(BaseEstimator):
def __init__(self, vectorizer, classifier, multilabel=False, normalizer=None):
self.vectorizer = vectorizer
self.classifier = classifier
self.multilabel = multilabel
self.normalizer = normalizer
self.model = None
self.target_encoder = None
def fit(self, raw_documents, topics):
if self.multilabel:
cls = OneVsRestClassifier(self.classifier)
self.target_encoder = LabelBinarizer()
else:
cls = self.classifier
self.target_encoder = LabelEncoder()
y = self.target_encoder.fit_transform(topics)
if self.normalizer:
self.model = Pipeline([('vect', self.vectorizer), ('norm', self.normalizer), ('cls', cls)])
else:
self.model = Pipeline([('vect', self.vectorizer), ('cls', cls)])
self.model.fit(raw_documents, y)
return self
def predict(self, raw_documents):
y = self.model.predict(raw_documents)
topics = self.target_encoder.inverse_transform(y)
return topics
def score(self, raw_documents, topics):
if self.multilabel:
return f1_score(topics, self.predict(raw_documents))
else:
return accuracy_score(topics, self.predict(raw_documents))
def chunkize(corpus, chunksize, maxsize=0, as_numpy=False):
"""
Ripped from gensim.utils.
Since we could be run in a thread from fex. WikiCorpus, we can't run as a daemon process.
This means processes will be left hanging as it stands. Run from scripts only.
---
Split a stream of values into smaller chunks.
Each chunk is of length `chunksize`, except the last one which may be smaller.
A once-only input stream (`corpus` from a generator) is ok, chunking is done
efficiently via itertools.
If `maxsize > 1`, don't wait idly in between successive chunk `yields`, but
rather keep filling a short queue (of size at most `maxsize`) with forthcoming
chunks in advance. This is realized by starting a separate process, and is
meant to reduce I/O delays, which can be significant when `corpus` comes
from a slow medium (like harddisk).
If `maxsize==0`, don't fool around with parallelism and simply yield the chunksize
via `chunkize_serial()` (no I/O optimizations).
>>> for chunk in chunkize(range(10), 4): print(chunk)
[0, 1, 2, 3]
[4, 5, 6, 7]
[8, 9]
"""
assert chunksize > 0
if maxsize > 0:
q = multiprocessing.Queue(maxsize=maxsize)
worker = InputQueue(q, corpus, chunksize, maxsize=maxsize, as_numpy=as_numpy)
worker.start()
sys.stdout.flush()
while True:
chunk = [q.get(block=True)]
if chunk[0] is None:
break
yield chunk.pop()
else:
for chunk in chunkize_serial(corpus, chunksize, as_numpy=as_numpy):
yield chunk
class MultiVectorizer(BaseEstimator):
def __init__(self, vectorizers=None):
if not vectorizers:
raise ValueError
self.vectorizers = vectorizers
def fit_transform(self, raw_documents, y=None):
x = [vect.fit_transform(raw_documents, y) for vect in self.vectorizers]
x = sparse.hstack(x)
return x
def fit(self, raw_documents, y=None):
for vect in self.vectorizers:
vect.fit(raw_documents, y)
return self
def transform(self, raw_documents):
x = [vect.transform(raw_documents) for vect in self.vectorizers]
x = sparse.hstack(x)
return x
class Word2VecVectorizer(BaseEstimator, TransformerMixin, VectorizerMixin):
def __init__(self, w2v_fn, preprocessor=None):
self.w2v_fn = w2v_fn
self.model = None
self.analyzer = 'word'
self.preprocessor = preprocessor
self.strip_accents = 'unicode'
self.lowercase = True
self.stop_words = None
self.tokenizer = None
self.token_pattern = r"(?u)\b\w\w+\b"
self.input = None
self.ngram_range = (1, 1)
self.encoding = 'utf-8'
self.decode_error = 'strict'
self.analyzer_func = None
def fit(self, x, y=None):
self.analyzer_func = self.build_analyzer()
self.model = Word2Vec.load(self.w2v_fn)
return self
def transform(self, raw_documents, y=None):
p = self.model.layer1_size
n = len(raw_documents)
x = zeros((n, p))
for row, doc in enumerate(raw_documents):
for token in self.analyzer_func(doc):
if token in self.model:
x[row, :] += self.model[token]
return x
def process(args):
text, clean_func, order = args
text = ' '.join(text)
if clean_func:
text = clean_func(text)
return sublexicalize(text, order=order, join=False)
class SublexicalizedCorpus(TextCorpus):
def __init__(self, base_corpus, order=3, word_limit=None, clean_func=mahoney_clean, create_dictionary=True,
n_proc=1):
self.order = order
self.clean_func = clean_func
self.base_corpus = base_corpus
self.word_limit = word_limit
self.n_proc = n_proc
super(SublexicalizedCorpus, self).__init__()
self.dictionary = Dictionary()
if create_dictionary:
self.dictionary.add_documents(self.get_texts())
def get_texts(self):
a_count = 0
t_count = 0
texts = ((text, self.clean_func, self.order) for text in self.base_corpus.get_texts())
pool = multiprocessing.Pool(self.n_proc)
start = time.clock()
prev = start
for group in chunkize(texts, chunksize=10 * self.n_proc, maxsize=100):
for tokens in pool.imap_unordered(process, group):
a_count += 1
cur = time.clock()
if cur - prev > 60:
logging.info("Sublexicalized %d in %d seconds, %.0f t/s"
% (t_count, cur - start, t_count*1. / (cur - start)))
prev = cur
t_count += len(tokens)
yield tokens
if self.word_limit and t_count > self.word_limit:
break
pool.terminate()
end = time.clock()
logging.info("Sublexicalizing %d finished in %d seconds, %.0f t/s"
% (t_count, end - start, t_count*1. / (end - start)))
self.length = t_count
class LimitCorpus(TextCorpus):
def __init__(self, base_corpus, word_limit):
super(LimitCorpus, self).__init__()
self.base_corpus = base_corpus
self.word_limit = word_limit
def __len__(self):
return len(self.base_corpus)
def __iter__(self):
w_count = 0
a_count = 0
for text in self.base_corpus.get_texts():
w_count += len(text)
a_count += 1
sys.stdout.write('.')
if a_count % 80 == 0:
sys.stdout.write('\n')
yield mahoney_clean(' '.join(text)).split()
if self.word_limit and w_count > self.word_limit:
break
def clean_c4(text_str):
return sublexicalize(mahoney_clean(text_str), order=4)
def clean_c5(text_str):
return sublexicalize(mahoney_clean(text_str), order=5)
def clean_c6(text_str):
return sublexicalize(mahoney_clean(text_str), order=6)
def baseline_pipelines(word_repr_path=None):
if not word_repr_path:
word_repr_path = os.getcwd()
return {
'base_word': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=mahoney_clean), MultinomialNB()),
'base_word_nopreproc': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=mahoney_clean), MultinomialNB()),
'base_c4': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c4), MultinomialNB()),
'base_c5': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c5), MultinomialNB()),
'base_c6': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c6), MultinomialNB()),
'base_mixed': TopicPipeline(MultiVectorizer([CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c4),
CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c5),
CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c6)]), MultinomialNB()),
'bcluster_word_metaoptimize':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'brown-rcv1.clean.tokenized-CoNLL03.txt-c3200-freq1.txt')),
MultinomialNB()),
'bcluster_word_wiki8_1024':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c1024')), MultinomialNB()),
'bcluster_word_wiki8_2048':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c2048')), MultinomialNB()),
'bcluster_c4_wiki8_1024':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c4-c1024'),
preprocessor=clean_c4), MultinomialNB()),
'bcluster_c4_wiki8_2048':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c4-c2048'),
preprocessor=clean_c4), MultinomialNB()),
'bcluster_c5_wiki8_1024':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c5-c1024'),
preprocessor=clean_c5), MultinomialNB()),
'bcluster_c5_wiki8_2048':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c5-c2048'),
preprocessor=clean_c5), MultinomialNB()),
'bcluster_c6_wiki8_1024':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c6-c1024'),
preprocessor=clean_c6), MultinomialNB()),
'bcluster_c6_wiki8_2048':
TopicPipeline(BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c6-c2048'),
preprocessor=clean_c6), MultinomialNB()),
'bcluster_mixed_wiki8_1024':
TopicPipeline(MultiVectorizer([BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c4-c1024'),
preprocessor=clean_c4),
BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c5-c1024'),
preprocessor=clean_c5),
BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c6-c1024'),
preprocessor=clean_c6)]), MultinomialNB()),
'bcluster_mixed_wiki8_2048':
TopicPipeline(MultiVectorizer([BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c4-c2048'),
preprocessor=clean_c4),
BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c5-c2048'),
preprocessor=clean_c5),
BrownClusterVectorizer(os.path.join(word_repr_path,
'wiki8-c6-c2048'),
preprocessor=clean_c6)]), MultinomialNB()),
'base_svm_word': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=mahoney_clean), LinearSVC()),
'base_svm_c4': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c4), LinearSVC()),
'base_svm_c5': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c5), LinearSVC()),
'base_svm_c6': TopicPipeline(CountVectorizer(max_features=1000,
decode_error='ignore',
strip_accents='unicode',
preprocessor=clean_c6), LinearSVC()),
'sg_word_wiki8_5_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-5-1000.w2v'),
preprocessor=mahoney_clean), LinearSVC()),
'sg_word_wiki8_10_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-10-1000.w2v'),
preprocessor=mahoney_clean), LinearSVC()),
'sg_word_wiki8_5_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-5-2000.w2v'),
preprocessor=mahoney_clean), LinearSVC()),
'sg_word_wiki8_10_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-5-2000.w2v'),
preprocessor=mahoney_clean), LinearSVC()),
'sg_c4_wiki8_25_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c4-25-1000.w2v'),
preprocessor=clean_c4), LinearSVC()),
'sg_c4_wiki8_50_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c4-50-1000.w2v'),
preprocessor=clean_c4), LinearSVC()),
'sg_c4_wiki8_25_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c4-25-2000.w2v'),
preprocessor=clean_c4), LinearSVC()),
'sg_c4_wiki8_50_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c4-50-2000.w2v'),
preprocessor=clean_c4), LinearSVC()),
'sg_c5_wiki8_25_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c5-25-1000.w2v'),
preprocessor=clean_c5), LinearSVC()),
'sg_c5_wiki8_50_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c5-50-1000.w2v'),
preprocessor=clean_c5), LinearSVC()),
'sg_c5_wiki8_25_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c5-25-2000.w2v'),
preprocessor=clean_c5), LinearSVC()),
'sg_c5_wiki8_50_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c5-50-2000.w2v'),
preprocessor=clean_c5), LinearSVC()),
'sg_c6_wiki8_25_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c6-25-1000.w2v'),
preprocessor=clean_c6), LinearSVC()),
'sg_c6_wiki8_50_1000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c6-50-1000.w2v'),
preprocessor=clean_c6), LinearSVC()),
'sg_c6_wiki8_25_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c6-25-2000.w2v'),
preprocessor=clean_c6), LinearSVC()),
'sg_c6_wiki8_50_2000': TopicPipeline(Word2VecVectorizer(os.path.join(word_repr_path, 'fil8-c6-50-2000.w2v'),
preprocessor=clean_c6), LinearSVC())
}
def run_experiment(corpus_path, topic_pipeline, n_folds=10, n_jobs=1, verbose=1):
articles = array(ArticleSequence(corpus_path, preprocessor=mahoney_clean))
topics = array(GroupSequence(corpus_path))
scores = cross_val_score(topic_pipeline, articles, topics, cv=KFold(len(articles), n_folds=n_folds, shuffle=True),
n_jobs=n_jobs, verbose=verbose)
return mean(scores), std(scores), scores
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
articles = ArticleSequence(newsgroups_corpus_path)
topics = GroupSequence(newsgroups_corpus_path)
model = TopicPipeline(Word2VecVectorizer('../../fil8-5-1000.w2v'), MultinomialNB(),
normalizer=MinMaxScaler())
scores = cross_val_score(model, articles, topics, verbose=2,
cv=KFold(len(articles), n_folds=10, shuffle=True))
print scores
| true |
db5cd7de7bf991e3cbb34ab753f7f6fe9cfba4a0 | Python | educaris/Microbit | /Les-3_2.py | UTF-8 | 564 | 2.734375 | 3 | [] | no_license | from microbit import *
import random
while True:
if button_a.is_pressed():
display.scroll("Dobbelsteen")
if accelerometer.was_gesture('shake'):
display.clear()
choice = random.randint(0, 5)
if choice == 0:
display.show("1")
elif choice == 1:
display.show("2")
elif choice == 2:
display.show("3")
elif choice == 3:
display.show("4")
elif choice == 4:
display.show("5")
elif choice == 5:
display.show("6") | true |
7322ace29e4b7bc3a3864ad98fd0d4552f2ef59c | Python | thalespaiva/sagelib | /sage/categories/examples/infinite_enumerated_sets.py | UTF-8 | 5,492 | 3.46875 | 3 | [] | no_license | """
Examples of infinite enumerated sets
"""
#*****************************************************************************
# Copyright (C) 2009 Florent Hivert <Florent.Hivert@univ-rouen.fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.parent import Parent
from sage.categories.infinite_enumerated_sets import InfiniteEnumeratedSets
from sage.structure.unique_representation import UniqueRepresentation
from sage.rings.integer import Integer
class NonNegativeIntegers(UniqueRepresentation, Parent):
r"""
An example of infinite enumerated set: the non negative integers
This class provides a minimal implementation of an infinite enumerated set.
EXAMPLES::
sage: NN = InfiniteEnumeratedSets().example()
sage: NN
An example of an infinite enumerated set: the non negative integers
sage: NN.cardinality()
+Infinity
sage: NN.list()
Traceback (most recent call last):
...
NotImplementedError: infinite list
sage: NN.element_class
<type 'sage.rings.integer.Integer'>
sage: it = iter(NN)
sage: [it.next(), it.next(), it.next(), it.next(), it.next()]
[0, 1, 2, 3, 4]
sage: x = it.next(); type(x)
<type 'sage.rings.integer.Integer'>
sage: x.parent()
Integer Ring
sage: x+3
8
sage: NN(15)
15
sage: NN.first()
0
This checks that the different methods of `NN` return consistent
results::
sage: TestSuite(NN).run(verbose = True)
running ._test_an_element() . . . pass
running ._test_category() . . . pass
running ._test_elements() . . .
Running the test suite of self.an_element()
running ._test_category() . . . pass
running ._test_eq() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
pass
running ._test_elements_eq() . . . pass
running ._test_enumerated_set_contains() . . . pass
running ._test_enumerated_set_iter_cardinality() . . . pass
running ._test_enumerated_set_iter_list() . . . pass
running ._test_eq() . . . pass
running ._test_not_implemented_methods() . . . pass
running ._test_pickling() . . . pass
running ._test_some_elements() . . . pass
"""
def __init__(self):
"""
TESTS::
sage: NN = InfiniteEnumeratedSets().example()
sage: NN
An example of an infinite enumerated set: the non negative integers
sage: NN.category()
Category of infinite enumerated sets
sage: TestSuite(NN).run()
"""
Parent.__init__(self, category = InfiniteEnumeratedSets())
def _repr_(self):
"""
TESTS::
sage: InfiniteEnumeratedSets().example() # indirect doctest
An example of an infinite enumerated set: the non negative integers
"""
return "An example of an infinite enumerated set: the non negative integers"
def __contains__(self, elt):
"""
EXAMPLES::
sage: NN = InfiniteEnumeratedSets().example()
sage: 1 in NN
True
sage: -1 in NN
False
"""
return Integer(elt) >= Integer(0)
def __iter__(self):
"""
EXAMPLES::
sage: NN = InfiniteEnumeratedSets().example()
sage: g = iter(NN)
sage: g.next(), g.next(), g.next(), g.next()
(0, 1, 2, 3)
"""
i = Integer(0)
while True:
yield self._element_constructor_(i)
i += 1
def __call__(self, elt):
"""
EXAMPLES::
sage: NN = InfiniteEnumeratedSets().example()
sage: NN(3) # indirect doctest
3
sage: NN(3).parent()
Integer Ring
sage: NN(-1)
Traceback (most recent call last):
ValueError: Value -1 is not a non negative integer.
"""
if elt in self:
return self._element_constructor_(elt)
else:
raise ValueError, "Value %s is not a non negative integer."%(elt)
def an_element(self):
"""
EXAMPLES::
sage: InfiniteEnumeratedSets().example().an_element()
42
"""
return self._element_constructor_(Integer(42))
def next(self, o):
"""
EXAMPLES::
sage: NN = InfiniteEnumeratedSets().example()
sage: NN.next(3)
4
"""
return self._element_constructor_(o+1)
def _element_constructor_(self, i):
"""
The default implementation of _element_constructor_ assumes
that the constructor of the element class takes the parent as
parameter. This is not the case for ``Integer``, so we need to
provide an implementation.
TESTS::
sage: NN = InfiniteEnumeratedSets().example()
sage: x = NN(42); x
42
sage: type(x)
<type 'sage.rings.integer.Integer'>
sage: x.parent()
Integer Ring
"""
return self.element_class(i)
Element = Integer
Example = NonNegativeIntegers
| true |
b23536697e4b1b977cb888e08c88432c1cf8a15c | Python | MikeGongolidis/rakoczi-aliens | /settings.py | UTF-8 | 801 | 3.046875 | 3 | [] | no_license |
class Settings():
def __init__(self):
self.screen_width = 1150
self.screen_height = 864
#self.bg_color = (230, 233, 233)
self.caterpie_speed = 5
self.fleet_drop_speed = 25
self.dragon_lifes = 2
self.fireball_width = 3
self.fireball_height = 15
self.fireballs_limit = 3
self.speedup_scale = 1.2
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
self.dragon_speed = 5
self.fireball_speed_factor = 10
self.caterpie_speed = 1
self.fleet_direction = 1
def increase_speed(self):
self.dragon_speed *= self.speedup_scale
self.fireball_speed_factor *= self.speedup_scale
self.caterpie_speed *= self.speedup_scale
| true |
041647453abd1751b983d9d60d59ee638d7e3a96 | Python | Sankarb475/Python_Learning | /Learning/adhoc.py | UTF-8 | 620 | 4.25 | 4 | [] | no_license | 1) repr method
================================================
The repr() function returns a printable representation of the given object.
class Node():
def __init__(self, data):
self.data = data
self.next = None
a = Node(3)
print(repr(a))
output: <__main__.Node object at 0x108452460>
------------------------------------------------
class Node():
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return "this is the node class of linkedlist"
a = Node(3)
print(repr(a))
output: this is the node class of linkedlist
| true |
16f3376f5fc0040e72832acdf0c22cca60327de3 | Python | Dragon631/Python_Learning | /Built-in module/subprocess_module.py | UTF-8 | 379 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import subprocess
# cmd = 'netstat -an'
cmd = 'ipconfig /all'
result_call = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
# 成功获取输出内容, 但数据类型是bytes,需要进行decode,windows的默认编码为GBK
# 将返回值进行decode
result = result_call.stdout.read().decode('gbk')
print(result)
| true |
14c260cce0d89d37e4788e786c50e723cb743616 | Python | Amanda1223/Finch | /musicexample.py | UTF-8 | 1,672 | 3.71875 | 4 | [
"MIT"
] | permissive | """
Plays a list of songs. Input a number to choose.
1. Michigan fight song
2. Intro to Sweet Child of Mine
3. Mario Theme Song
Uses notes.py, an add-on library that simplifies buzzer song creation.
Thanks to Justas Sadvecius for the library!
The Finch is a robot for computer science education. Its design is the result
of a four year study at Carnegie Mellon's CREATE lab.
http://www.finchrobot.com
"""
from finch import Finch
from time import sleep
import notes
#Main function for the music player example program"""
#Initialize the finch
finch = Finch()
songList = ['E5 C D E C D E F D E F D E F G A GE F C D E G E D C ',
'D D5 A4 G G5 A4 F5# A4 D D5 A4 G G5 A4 F5# A4 '
'E D5 A4 G G5 A4 F5# A4 E D5 A4 G G5 A4 F5# A4 '
'G D5 A4 G G5 A4 F5# A4 G D5 A4 G G5 A4 F5# A4 '
'D D5 A4 G G5 A4 F5# A4 D D5 A4 G G5 A4 F5# A4 ',
'E5 E E C E G G4 C5 G4 E A BBb A G '
'E5 G A F G E C D B4 C5 G4 E A BBb A G '
'E5 G A F G E C D B4 - G5 Gb F D# E G4# A C5 A4 C5 D '
'G5 Gb F D# E C6 C6 C6 '
'G5 Gb F D# E G4# A C5 A4 C5 D Eb D C '
' G5 Gb F D# E G4# A C5 A4 C5 D G5 Gb F D# E C6 C C ']
timeList = [0.18,0.1,0.1]
song = 1
while song > 0 and song < 4:
#get which song
song = int(input("Enter 1 for the Michigan fight song, 2 for Sweet Child of Mine,"
"3 for the Mario theme song; any other number to exit."))
if song >=1 and song <= 3:
notes.sing(finch, songList[song -1],timeList[song-1])
else:
print('Exiting...')
| true |
30f5ac53760ca3f672505eac58da3626a9dd969b | Python | WeiSen0011/ImageProcessing-Python | /blog36-jhbh/blog36-06-xz.py | UTF-8 | 549 | 2.796875 | 3 | [] | no_license | #encoding:utf-8
#By:Eastmount CSDN 2021-02-01
import cv2
import numpy as np
#读取图片
src = cv2.imread('test.bmp')
#源图像的高、宽 以及通道数
rows, cols, channel = src.shape
#绕图像的中心旋转
M = cv2.getRotationMatrix2D((cols/2, rows/2), 30, 1) #旋转中心 旋转度数 scale
rotated = cv2.warpAffine(src, M, (cols, rows)) #原始图像 旋转参数 元素图像宽高
#显示图像
cv2.imshow("src", src)
cv2.imshow("rotated", rotated)
#等待显示
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
efc70559626421e7a57fe4f5de4b1838d7b383d1 | Python | bestgopher/dsa | /hash_map/chain_hash_map.py | UTF-8 | 1,000 | 3.125 | 3 | [
"MIT"
] | permissive | from hash_map.hash_map_base import HashMapBase
from hash_map.unsorted_table_map import UnsortedTableMap
class ChainHashMap(HashMapBase):
"""Hash map implemented with separate chaining for collision resolution."""
def _bucket_getitem(self, j, key):
bucket = self._table[j]
if bucket is None:
raise KeyError("Key Error: " + repr(key))
return bucket[key]
def _bucket_setitem(self, j, key, value):
if self._table(j) is None:
self._table[j] = UnsortedTableMap()
old_size = len(self._table[j])
self._table[j][key] = value
if len(self._table[j]) > old_size:
self._n += 1
def _bucket_delitem(self, j, key):
bucket = self._table[j]
if bucket is None:
raise KeyError("Key Error: " + repr(key))
del bucket[key]
def __iter__(self):
for bucket in self._table:
if bucket:
for key in bucket:
yield key
| true |
1bdedec4678ac7f176958f9a2b211cac2e4ede5f | Python | ElectricR/PyTrainer | /gui.py | UTF-8 | 2,102 | 3.328125 | 3 | [] | no_license | from engine import Engine
import tkinter as tk
class GUI:
def __init__(self):
self.engine = Engine()
self.window = tk.Tk()
self.window.bind('<Escape>', self.close)
self.window.geometry('900x500')
self.window.title('Radicals')
self.reset_button = tk.Button(self.window, text='Reset', command=self.reset)
self.reset_button.pack(side='left', anchor='n')
self.quit_button = tk.Button(self.window, text='Quit', command=self.close)
self.quit_button.pack(side='right', anchor='n')
self.character = tk.StringVar()
self.character_label = tk.Label(self.window, textvariable=self.character, font=('Arial', '200'))
self.character_label.pack(side='top')
self.dialog_label = tk.Label(self.window, text='What\'s this?')
self.dialog_label.pack(side='top')
self.entry = tk.Entry(self.window)
self.entry.pack(side='top')
self.entry.focus()
self.check_button = tk.Button(self.window, text='Check', command=self.check)
self.check_button.pack(side='top', pady=32)
self.wrong_answer_label = tk.Label(self.window, text='Wrong answer!')
self.correct_answer_label = tk.Label(self.window, text='Correct!')
self.update_character()
self.window.mainloop()
def close(self, event=None):
self.window.destroy()
def update_character(self):
self.character.set(self.engine.get_lesson()[1])
def check(self):
self.wrong_answer_label.pack_forget()
self.correct_answer_label.pack_forget()
if self.engine.check(self.entry.get()):
self.character.set(self.engine.get_lesson()[1])
self.correct_answer_label.pack(side='top')
self.entry.delete(0, 'end')
else:
self.wrong_answer_label.pack(side='top')
def reset(self):
self.entry.delete(0, 'end')
self.wrong_answer_label.pack_forget()
self.correct_answer_label.pack_forget()
self.engine.reset()
self.character.set(self.engine.get_lesson()[1])
| true |
72fdcc5d51a07bb5428c7535775af513f07ce22b | Python | fomalhaut88/passstore | /models/commands/Delkey.py | UTF-8 | 435 | 2.859375 | 3 | [] | no_license | from Command import Command
class Delkey(Command):
title = "delkey"
def __init__(self, pass_storage, title, key):
super(Delkey, self).__init__(pass_storage)
self._title = title
self._key = key
def execute(self):
ok = self._pass_storage.delkey(self._title, self._key)
if ok:
return "The key has been removed."
else:
return "The key not found."
| true |
146172840856112d8a34dc42d4b2e2ab1a6a638b | Python | earvingemenez/baymax | /baymax.py | UTF-8 | 2,172 | 2.6875 | 3 | [] | no_license | import string, cgi, time
from os import curdir, sep
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class ServerHandler(BaseHTTPRequestHandler):
""" Simple webserver built to serve static web pages.
I don't know why i named it baymax so don't ask why. haha!
"""
def do_GET(self):
try:
f = open(curdir + sep + self.path)
self.send_response(200)
# Serve .html and .php files
# Check if the path's file ends with .html or .php
if self.path.endswith('.html') or self.path.endswith('.php'):
# serve as text/html
self.send_header('Content-type', 'text/html')
elif self.path.endswith('.js'):
# serve as text/javascript
self.send_header('Content-type', 'text/javascript')
elif self.path.endswith('.css'):
# serve as text/javascript
self.send_header('Content-type', 'text/css')
elif self.path.endswith('.jpg') or self.path.endswith('.jpeg'):
# serve as text/jpeg
self.send_header('Content-type', 'image/jpeg')
elif self.path.endswith('.png'):
# serve as text/png
self.send_header('Content-type', 'image/png')
else:
self.send_error(404, "I don't know that page. get lost!")
return
# Other responses
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404, "I don't know that page. get lost!")
if __name__ == '__main__':
try:
server = HTTPServer(('', 80), ServerHandler)
print "Initializing Baymax..."
print
print "Hi, I'm Baymax. I'm your personal health care companion."
print
print "The server is now running..."
server.serve_forever()
except KeyboardInterrupt:
print
print "Are you satisfied with your care? :'("
print
print "Baymax is going back to sleep now."
server.socket.close() | true |
4dde0946f339d2845b593c67e9fe6ea071d0365d | Python | jwelker110/store_backend | /store_app/blueprints/item.py | UTF-8 | 4,292 | 2.65625 | 3 | [] | no_license | from flask import Blueprint, request
from json import loads
from string import lower
from store_app.database import Item, CategoryItems, Category
from store_app.extensions import db
from helpers import create_response, convertToInt, decode_jwt
item_bp = Blueprint('item_bp', __name__)
@item_bp.route('/api/v1/items.json', methods=['GET', 'POST'])
def items_ep():
# todo this needs to return an array of items, instead of returning a
# JSON object with an array of items attached
if request.method == 'GET':
offset = convertToInt(request.args.get('offset'))
category = request.args.get('category')
if category is None:
items = Item.query.offset(offset).limit(25).all()
else:
items = db.session.query(Item)\
.outerjoin(CategoryItems)\
.outerjoin(Category)\
.filter_by(name=category).offset(offset).limit(25).all()
return create_response({"items": items})
elif request.method == 'POST':
data = loads(request.data)
# item info
name = data.get('name')
description = data.get('description')
price = data.get('price')
sale_price = data.get('sale_price')
image_url = data.get('image_url')
stock = data.get('stock')
# jwt to ensure user is authorized
payload = decode_jwt(data.get('jwt_token'))
if payload is None:
return create_response({}, status=401)
if payload.get('confirmed') is False:
return create_response({}, status=401)
if name is None or price is None:
return create_response({}, status=400)
try:
item = Item(
owner_name=lower(payload.get('username')),
name=name,
description=description,
price=price,
image_url=image_url,
sale_price=sale_price,
stock=stock
)
db.session.add(item)
db.session.commit()
return create_response({})
except:
db.session.rollback()
return create_response({}, status=500)
@item_bp.route('/api/v1/items/details.json', methods=['GET', 'PUT'])
def item_details_ep():
# todo this needs to return a JSON object representing the item with the
# item meta as an attribute of the object
if request.method == 'GET':
name = request.args.get('name')
if name is None:
item = []
else:
item = Item.query.filter_by(name=name).first()
# tuples are returned from the query so must be accessed via index
return create_response({"item": item})
elif request.method == 'PUT':
data = loads(request.data)
# item info
name = data.get('name')
description = data.get('description')
image_url = data.get('image_url')
price = data.get('price')
sale_price = data.get('sale_price')
stock = data.get('stock')
payload = decode_jwt(data.get('jwt_token'))
if payload is None:
return create_response({}, status=401)
# get the item
item = Item.query.filter_by(name=name).first()
# does the item exist? how about the item meta?
if item is None:
return create_response({}, status=400)
# does the user actually own the item?
if item.owner_name != lower(payload.get('username')):
# can't change someone else' item
return create_response({}, status=401)
try:
# everything checks out, update the item
item.name = item.name if name is None else name
item.description = item.description if description is None else description
item.image_url = item.image_url if image_url is None else image_url
item.price = item.price if price is None else price
item.sale_price = item.sale_price if sale_price is None else sale_price
item.stock = item.stock if stock is None else stock
db.session.commit()
return create_response({})
except:
db.session.rollback()
return create_response({}, status=500)
| true |
30a44d2873bd38bdac462ec164d5823b9305334b | Python | mbenitezm/taskr | /bin/lib/taskr/Taskr.py | UTF-8 | 6,693 | 2.515625 | 3 | [] | no_license | import yaml, sys, logging
from termcolor import colored
from prettytable import PrettyTable
from os.path import expanduser
from os.path import isdir
from os import mkdir
from Utils import Utils
from Exceptions import *
from Task import Task
from WorkSession import WorkSession
class Taskr():
taskslog_name = "task_log"
errorlog = "error.log"
taskr_path = ".taskr/"
root = "."
def __init__(self):
home = expanduser("~") + "/"
if not isdir(home+".taskr"):
mkdir(home + ".taskr")
Taskr.root = home + self.taskr_path
logging.basicConfig(filename=Taskr.root + self.errorlog, level=logging.DEBUG)
self.taskslog_path = Taskr.root + self.taskslog_name
try:
self.log = open(self.taskslog_path,"r+")
except IOError as ioe:
self.log = open(self.taskslog_path,"w+")
except Exception as e:
print "Unexpected error ocurred"
logging.error(e)
sys.exit(1)
self.__loadtasks()
self.log.close()
def __loadtasks(self):
try:
Taskr.load_all(self.log) or []
except Exception as e:
print "Error loading tasks"
logging.error(e)
sys.exit(1)
def saveTasks(self):
try:
self.log = open(self.taskslog_path,"w+")
self.log.write(yaml.dump(Taskr.tasks))
self.log.close()
except IOError as ioe:
print "Error while saving"
logging.error(e)
sys.exit(1)
@staticmethod
def load_all(tasklog):
try:
Taskr.tasks = yaml.load(tasklog) or []
return Taskr.tasks
except Exception as e:
print e
print "Error loading tasks"
logging.error(e)
sys.exit(1)
@staticmethod
def find(taskid):
if len(Taskr.tasks) == 0:
raise NoTasksException()
else:
for element in Taskr.tasks:
if element.id[0:8] == str(taskid):
return element
exm = "Task "+str(taskid)+" not found"
raise TaskNotFoundException(exm)
def orderData(self):
weeklog = {}
for task in Taskr.tasks:
for session in task.worklog:
k = session.start_time
weeklog[k] = (task,session)
self.weeklog = sorted(weeklog.iteritems(), key=lambda key_value: key_value[0])
self.weeklog_total = 0
for (time,(task,session)) in self.weeklog:
self.weeklog_total += session.duration
def printWeeklog(self):
if len(Taskr.tasks) > 0:
print "This weeks weeklog"
output = Utils.weeklogHeader()
output.align["Date"]
Utils.tags["-"] = Utils.colorTags("-")
self.orderData()
for (time,(task,session)) in self.weeklog:
r = [Utils.dateonlyfmt(time),Utils.houronlyfmt(time),Utils.hourstohuman(session.duration),session.location,task.name,Utils.colorTags(task.tag)]
output.add_row(r)
print output.get_string(border=False)
else:
print "You currently don't have any registered tasks"
def printTask(self,task=None):
if task is None:
return False
output = Utils.tableHeader(True)
output.align["Task"]
Utils.tags["-"] = Utils.colorTags("-")
output.add_row(task.to_row(True))
print output.get_string(border=False)
print ""
print "Task worklog:"
wsess = Utils.workSessionsTableHeader()
wsess.align["Start"]
wsess.align["Related Commits"] = "l"
for ws in task.worklog:
wsess.add_row(ws.to_row())
print wsess.get_string(border=False)
def printTasks(self,all=False,detailed=False):
if len(Taskr.tasks) > 0:
print "Your current task log:"
output = Utils.tableHeader(detailed)
output.align["Task"]
Utils.tags["-"] = Utils.colorTags("-")
for task in Taskr.tasks[-5:] if not all else Taskr.tasks:
output.add_row(task.to_row(detailed))
print output.get_string(border=False)
else:
print "You currently don't have any registered tasks"
def taskInfo(self,specific_task = False):
try:
if specific_task:
task = Taskr.find(specific_task)
self.printTask(task)
else:
self.printTasks(True,True)
except Exception as e:
print e
def closeCurrentTask(self):
try:
last_task = Taskr.tasks[-1]
if last_task.close():
task_count = len(Taskr.tasks)
if task_count > 1:
Taskr.tasks.pop()
i = -1
while -task_count < i is not None and Taskr.tasks[i].status != 0:
i = i - 1
Taskr.tasks.insert(i+1,last_task) if i != -1 else Taskr.tasks.append(last_task)
self.printTask(last_task)
else:
raise TaskNotFoundException("")
except TaskNotFoundException as nte:
raise TaskNotFoundException("")
except IndexError as ie:
pass
except Exception as e:
print e
def pauseCurrentTask(self):
try:
last_task = Taskr.tasks[-1]
last_task.pause()
except IndexError as ie:
raise TaskNotFoundException("")
def renewTaskAt(self,when):
try:
last_task = Taskr.tasks[-1]
last_task.renewAt(when)
except IndexError as ie:
raise TaskNotFoundException("")
# TODO
def openTask(self,task_id=None):
try:
self.pauseCurrentTask()
except TaskNotFoundException as nte:
pass
try:
task = Taskr.find(task_id)
if task.status == 0:
task.status = 1
task.start()
Taskr.tasks.remove(task)
Taskr.tasks.append(task)
print "Reopened task: "+task_id
except Exception as e:
print colored("No task found by id: " + str(task_id),"cyan")
self.printTasks()
# TODO
def resumeCurrentTask(self,task_id=True):
try:
last_task = Taskr.tasks[-1]
last_task.pause()
task_to_resume = Taskr.find(task_id) if task_id != True else Taskr.tasks[-1]
task_to_resume.resume()
self.__upriseTask(task_to_resume)
except Exception as e:
print e
print colored("No paused task","cyan")
self.printTasks()
def __upriseTask(self,task):
Taskr.tasks.remove(task)
Taskr.tasks.append(task)
# TODO
def deleteTask(self,task_id=True):
try:
if task_id != True:
last_task = Taskr.find(task_id)
else:
raise TaskNotFoundException("")
Taskr.tasks.remove(last_task)
except Exception as e:
print colored("Couldn't delete task","cyan")
self.printTasks()
# Complete
def newTask(self, name=None, estimated=None, tag=""):
name = colored("Untitled","red") if name == None else name
estimated = 0.0 if estimated == None else estimated
try:
self.pauseCurrentTask()
except TaskNotFoundException as nte:
pass
t = Task({ "name": name, "tag": tag, "estimated": estimated, })
t.start()
Taskr.tasks.append(t)
| true |
7c518120cab98a92cce5fe2aef1987dda080dc14 | Python | Lethons/PythonExercises | /PythonProgram/chapter_04/4-10.py | UTF-8 | 225 | 4.1875 | 4 | [] | no_license | ls = [x for x in range(1, 11)]
print("The first three items in the list are:" + str(ls[0:3]))
print("Three items from the middle of the list are:" + str(ls[2:5]))
print("The last three items in the list are:" + str(ls[-3:]))
| true |
a5632cd84b73263fc253fe1ebb8d98351bef689e | Python | chalitgubkb/python | /ฝึกทำในหนังสือ/1.7.py | UTF-8 | 598 | 4 | 4 | [] | no_license | #วิธีการเขียนแบบ ธรรมดา
n1 = int(input('Enter Your Num 1: '))
n2 = int(input('Enter Your Num 2: '))
print(n1,'+',n2,'= %d' %(n1+n2))
print(n1,'-',n2,'= %d' %(n1-n2))
print(n1,'*',n2,'= %d' %(n1*n2))
print(n1,'/',n2,'= %d' %(n1/n2))
#วิธีการเขียนแบบ while
n = 1
o = []
while n<=2:
i = int(input('Enter Your Number : '))
o.append(i)
n = n+1
print(o[0],'+',+o[1],'= %d' %(o[0]+o[1]))
print(o[0],'-',+o[1],'= %d' %(o[0]-o[1]))
print(o[0],'*',+o[1],'= %d' %(o[0]*o[1]))
print(o[0],'/',+o[1],'= %d' %(o[0]/o[1]))
| true |
c362d7e5bb852959cedd3fe32f70a2481ef460ac | Python | wgcn96/HPSVD | /draw/performance/demo.py | UTF-8 | 1,347 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
note something here
"""
__author__ = 'Wang Chen'
__time__ = '2019/7/24'
# if __name__ == '__main__':
# import matplotlib.pyplot as plt
#
# fig, ax = plt.subplots()
# ax.set_xscale('log', basex=5)
# ax.set_yscale('log', basey=2)
#
# ax.plot(range(1024))
# plt.show()
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoLocator, ScalarFormatter
Temp=[10,12.5,15,17.5,20,22.5,25,27.5,30,32.5,35,37.5,40,42.5,45,47.5,50]
I_threshold = [22.376331312083646, 22.773439481450737, 23.440242034972115,
23.969920199339803, 24.80014584753161, 25.275728442307503,
26.291852943772966, 26.969268640398795, 28.09683889698702,
28.952552190706545, 30.325961112054102, 31.488435380923281,
33.176033568454699, 34.613872631424236, 36.710165595581906,
38.567151879424728, 41.245216030694756]
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.yaxis.set_major_locator(AutoLocator())
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.minorticks_off()
ax.scatter(Temp,I_threshold)
plt.xlabel('$ T_c \ (^\circ C)$')
plt.ylabel('$ I_t \ (mA) $')
plt.grid(True,which="major",color='black',ls="-",linewidth=0.5)
plt.show()
| true |
dd9cefff6d2b17fe514ec17753c97e229aca5094 | Python | stevenjwheeler/AntiScamAI | /record_engine.py | UTF-8 | 2,896 | 2.578125 | 3 | [] | no_license | from threading import Thread
from queue import Queue, Empty
import speech_recognition as sr
import os
import time
import random
import wit_response_engine
import gvoice_response_engine
import error_reporter
import logger
def setMicrophone(indexnumber):
audio_input_device = indexnumber
return audio_input_device
def setRecognitionSettings():
recognition = sr.Recognizer()
recognition.pause_threshold = 1 # seconds of silence considers phrase done
recognition.non_speaking_duration = 0.2 # seconds on each side of recording
return recognition
def listen_loop(queue, microphone, recognition):
with sr.Microphone(device_index=microphone) as source:
print("[ ] Starting listening thread")
logger.log("Starting listening thread.")
logger.log("Boot completed.")
while True:
print("[ ] Listening for new sentence")
audio = recognition.listen(source)
queue.put(audio) #add captured data to the queue
def listen(microphone, witapikey, recognition, random_filler):
audio_queue = Queue() #initialize queue for audio data
print("[ ] Starting voice engine")
logger.log("Starting voice engine.")
listen_thread = Thread(target=listen_loop, args=[audio_queue, microphone, recognition], daemon=True)
listen_thread.start()
while True:
try:
audio = audio_queue.get(block=True,timeout=random.randrange(3,8))
except Empty:
#send filler content if no speaking is occuring
try:
print("[ ] Speaking filler")
gvoice_response_engine.synthesize_text(random.choice(random_filler))
except:
print("[!!!] SPEAKING FILLER FAILED - IS random_filler NOT LOADED?")
error_reporter.reportError("App could not speak filler")
logger.log("App could not speak filler.")
else:
try:
#recognise mic audio using wit.ai
start = time.time()
wit_response = recognition.recognize_wit(audio, key=witapikey, show_all=True)
end = time.time() - start
wit_response_engine.respond(wit_response, end)
#errors
except sr.UnknownValueError:
print("[!!!] WIT.AI COULD NOT UNDERSTAND THE AUDIO")
error_reporter.reportError("Wit.ai could not understand the provided audio")
logger.log("Wit.ai could not understand the provided audio.")
except sr.RequestError as e:
print("[!!!] COULD NOT REQUEST RESULTS FROM WIT.AI; {0}".format(e))
error_reporter.reportError("App was unable to connect to Wit.ai: {0}".format(e))
logger.log("App was unable to connect to Wit.ai: {0}".format(e)) | true |
9cb83b34d51cf6b178af922227199e046d97d4f9 | Python | MikaMahaputra/Binus | /Mr Jude Project/ATM/Interface.py | UTF-8 | 3,697 | 3.484375 | 3 | [] | no_license | #Importing preivous files
import Account
import Atm
from pygame import mixer
def music():
mixer.init()
mixer.music.load("wii.mp3")
mixer.music.play(loops=-1)
def nope():
mixer.music.stop()
mixer.music.load("nope.mp3")
mixer.music.play()
#Call To Play The Music
music()
#Variables In Account Class
bankNumber= int(input("Please enter your bank number: "))
pin = int(input("Please enter your PIN: "))
balance= int(input("Please enter your balance: "))
myAccount= Account.Account(balance, pin, bankNumber)
#Variables In Atm Class
bankName= (input("Please enter your bank name: "))
atmLocation= (input("Please enter your atm location: "))
myAtm = Atm.ATM(bankName,atmLocation)
#Variables for interface
inputPin= 0
inputChoice= 0
ammountTaken= 0
accountTransfer = 0
ammountTransfer = 0
checkLoop = True
checkLoop2 = True
#Function To Confirm An Action
def ConfirmationMenu():
print("Do You Want To Do Another Transaction ?")
print("1. Yes")
print("2. No")
inputChoice= int(input("Please Choose An Option\n "))
if (inputChoice == 1):
print("\033[H\033[J")
return True
elif(inputChoice ==2):
print ("\033[H\033[J")
print("Thank You For Using This ATM")
nope()
return False
print("\033[H\033[J") #Used To Clear Console
print("Welcome To", myAtm.getbankName())
while checkLoop2:
inputPin= int(input("Please Enter Your Pin\n"))
if(myAtm.CheckPin(inputPin,myAccount.getPin()) == True):
while (checkLoop):
("\033[H\033[J")
print("1. Check Balance")
print("2. Withdraw Money")
print("3. Transfer")
print("4. Exit")
inputChoice = int(input("Please Select An Option "))
if(inputChoice == 1):
print("\033[H\033[J")
print("Your Current Balance Is\n", myAtm.CheckBalance(myAccount.getBalance()))
checkLoop = ConfirmationMenu()
checkLoop2= checkLoop
elif(inputChoice == 2):
print("\033[H\033[J")
ammountTaken = int(input("Please Enter The Amount To Withdraw \n"))
if(myAtm.Withdraw(myAccount.getBalance(), ammountTaken) == True):
myAccount.setBalance(myAtm.SetNewBalance(myAccount.getBalance(), ammountTaken))
print("Withdrawal Successfull")
else:
print("Insufficient Balance")
checkLoop = ConfirmationMenu()
checkLoop2= checkLoop
elif(inputChoice == 3):
print("\033[H\033[J")
accountTransfer= int(input("Please Enter An Account Number \n"))
ammountTransfer= int(input("Please Enter How Much To Transfer \n"))
if(myAtm.Transfer(myAccount.getBalance(), accountTransfer, ammountTransfer) == True):
myAccount.setBalance(myAtm.SetNewBalance(myAccount.getBalance(), ammountTransfer))
print("Transfer Successfull")
else:
print("Please Check Your Transaction Again")
checkloop = ConfirmationMenu()
checkLoop2= checkLoop
checkloop = ConfirmationMenu()
checkLoop2= checkLoop
elif(inputChoice == 4):
print ("\033[H\033[J")
print ("Thank You For Using This ATM")
nope()
checkLoop = False
checkLoop2 = False
else:
print("\033[H\033[J")
print("Invalid PIN ")
| true |
e154e227ec68a36f7b1f3792bbf51a8defb63cb3 | Python | ccqpein/Arithmetic-Exercises | /Buy-and-Sell-Stock/BaSS.py | UTF-8 | 963 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | test1 = [] # return 0
test2 = [2, 1, 2, 1, 0, 1, 2] # return 2
test3 = [1] # return 0
test4 = [7, 1, 5, 3, 6, 4] # return 5
test5 = [2, 4, 1] # return 2
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
maxT = 0
result = 0
try:
minT = prices[0]
testV = prices[1]
except:
return 0
for i in prices[1:]:
print("this {0}".format(i))
if i - minT < 0:
minT = i
print("now, min is {0}".format(i))
else:
if i - minT > result:
maxT = i
print("now, max is {0}".format(i))
result = maxT - minT
else:
pass
# result = maxT - minT
if maxT - minT < 0:
return 0
else:
return result
| true |
555e3bf24e9cf164415defc80c2797b85ffbe26c | Python | sharepusher/leetcode-lintcode | /data_structures/string/string_permutation.py | UTF-8 | 333 | 3.578125 | 4 | [
"Apache-2.0"
] | permissive | ## Reference
# https://www.lintcode.com/problem/string-permutation/description
## Easy - Permutation/String
## Description
# Given two strings, write a function to decide if one is a permutation of the other.
# Example
# Example 1:
# Input: "abcd", "bcad"
# Output: True
# Example 2:
# Input: "aac", "abc"
# Output: False
| true |
b6609437d1f819a494ac06d6c52225ca7e6ef622 | Python | thomasyp/project | /mpfs.py | UTF-8 | 8,260 | 2.828125 | 3 | [] | no_license | #!/home/yangpu/bin/anaconda3/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 10:15:47 2018
Main program for calculating maximum power peak factor of
rod withdrawal accident and rod drop accident.
Description of control rod:
Two shim rod: tr3 for 3# rod; tr5 for 5# rod.
One regulating rod: tr6 for 11# rod.
One Scram Rod: tr1 for 1# rod.
Notice:
The rodInsertpostion in the program is not the real insert
postion, the variable actually equal tothe 180 - insert postion.
@author: Thomas Yang
"""
from tool.handle_mcnpinp import McnpinpHandler
from control_rod import ControlRod
import os
from powerdistribution import computePowerDesityDistribution
import argparse
import re
def getMeshFilename(outputfile):
with open(outputfile, 'r') as fid:
for line in fid:
if 'Mesh tallies written to file' in line:
mesh_filename = line.strip().split()[-1]
return mesh_filename
return None
def readKeff(inp):
"""
Function: Read keff,dev and average Num of neutron per Fissionor from mcnp output file.
Parameters:
inp: the name of mcnp output file.
Return: kef,dev,averageNumNperFission
"""
with open(inp,'r') as f:
for line in f:
if 'final estimated' in line:
kef = float(line.split()[8])
dev = float(line.split()[15])
if 'the average number of neutrons produced per fission' in line:
averageNumNperFission = float(line.strip().split()[-2])
return kef,dev,averageNumNperFission
def maxPowerPeakFactorSearch(mcnpinp, node, ppn, trforrod, mode, rodstep=10):
"""
Function: under different rod position, compute the power distribution, power peak factor and find the
maximum power peak factor, finally write all results to a filename of inp+'results.out'.
Parameters:
inp: the name of mcnp output file.
node: the node for parallel computing.
ppn: the core for parallel computing.
trforrod: dict for rod, key is 'tr' card, value is rod num.
mode: mode for rod withdrawal accident or rod drop accident, 'w' stands for withdrawal and
'd' stands for drop.
rodstep: rod move step everytime.
Return: none
"""
cr = {}
rodlists = {}
powerfactorresults = {}
mh = McnpinpHandler()
# read initial rod position of burnup mcnp input
for key, value in trforrod.items():
rodmessage = mh.readContent(mcnpinp, key, 'data')
lists = rodmessage.strip().split()
rodxcoordinate = float(lists[1])
rodycoordinate = float(lists[2])
rodinsertpostion = float(lists[3])
cr[value] = ControlRod(rod=value, trCard=key, rodRange=180.0, rodXCoordinate=rodxcoordinate, rodYCoordinate=rodycoordinate)
cr[value].setInsertPosition(rodinsertpostion)
rodlists[value] = key
powerfactorresults[value] = []
print(rodlists)
print(powerfactorresults)
mh.cleanup(mcnpinp)
if re.match('w', mode, re.I) is not None:
limit = 180.
factor = 1
elif re.match('d', mode, re.I) is not None:
limit = 0
factor = -1
else:
print("Mode set error! Should be w or d!")
exit(0)
for rod in rodlists:
ii = 0
initinsertposition = cr[rod].getInsertPosition()
while(cr[rod].getInsertPosition()*factor < limit):
instertposition = initinsertposition + rodstep*ii*factor
if instertposition*factor > limit:
instertposition = limit
cr[rod].setInsertPosition(instertposition)
### modify mcnp inp
mh.modifyinp(mcnpinp, cr[rod].getTrCardNo(), cr[rod].ouputforMcnpinp(), 'data')
ii = ii + 1
### run mcnp
print(' mpirun -r ssh -np '+str(int(node*ppn))+' /home/daiye/bin/mcnp5.mpi n='+mcnpinp)
os.system(' mpirun -r ssh -np '+str(int(node*ppn))+' /home/daiye/bin/mcnp5.mpi n='+mcnpinp)
if os.path.isfile(mcnpinp+'o'):
print('MCNP5 run finished!')
else:
print('error!!!,MCNP5 run failed!')
exit(0)
### read results and write to results file
keff = readKeff(mcnpinp+'o')
meshfilename = mcnpinp + '_mesh_' + rod + '_' + str(instertposition)
original_meshfilename = getMeshFilename(mcnpinp+'o')
if os.path.isfile(original_meshfilename):
mh.deleteFiles(meshfilename)
os.rename(original_meshfilename, meshfilename)
print("Rename meshtal to {:}\n".format(meshfilename))
resultsfilename = mcnpinp + rod + '_' + str(instertposition) + '.csv'
uncertainty = 1.1 * 1.1
radialPowerPeakFactor, axialPowerPeakFactor, totPowerPeakFactor = computePowerDesityDistribution(
meshfilename, resultsfilename, uncertainty)
powerfactorresults[rod].append((instertposition, keff[0], radialPowerPeakFactor,
axialPowerPeakFactor, totPowerPeakFactor))
mh.cleanup(mcnpinp)
## set rod insertposition to inital
cr[rod].setInsertPosition(initinsertposition)
mh.modifyinp(mcnpinp, cr[rod].getTrCardNo(), cr[rod].ouputforMcnpinp(), 'data')
maxradialPowerPeakFactor = 0
maxaxialPowerPeakFactor = 0
maxtotPowerPeakFactor = 0
maxrod1 = ''
maxrod2 = ''
maxrod3 = ''
#print(powerfactorresults)
with open(mcnpinp+'results.out', 'w') as fid:
fid.write('{:^5}{:^20}{:^8}{:^20}{:^20}{:^20}\n'.format\
('Rod', 'Insert position', 'Keff', 'Radial peak factor', 'Axial peak factor', 'Tot peak factor'))
for rod in powerfactorresults:
for ii in range(len(powerfactorresults[rod])):
radialpowerfactor = powerfactorresults[rod][ii][2]
axialpowerfactor = powerfactorresults[rod][ii][3]
totpowerfactor = powerfactorresults[rod][ii][4]
instertposition = powerfactorresults[rod][ii][0]
keff = powerfactorresults[rod][ii][1]
if maxradialPowerPeakFactor < radialpowerfactor:
maxrod1 = rod
maxradialPowerPeakFactor = radialpowerfactor
if maxaxialPowerPeakFactor < axialpowerfactor:
maxrod2 = rod
maxaxialPowerPeakFactor = axialpowerfactor
if maxtotPowerPeakFactor < totpowerfactor:
maxrod3 = rod
maxtotPowerPeakFactor = totpowerfactor
fid.write('{:^5}{:^20.3f}{:^8.5f}{:^20.4f}{:^20.4f}{:^20.4f}\n'.format\
(rod, instertposition, keff, radialpowerfactor, axialpowerfactor, totpowerfactor))
fid.write('{:}: {:}\n'.format('Rod', maxrod1))
fid.write('{:}: {:.4}\n'.format('Max radial power peak factor', maxradialPowerPeakFactor))
fid.write('{:}: {:}\n'.format('Rod', maxrod2))
fid.write('{:}: {:.4}\n'.format('Max axial power peak factor', maxaxialPowerPeakFactor))
fid.write('{:}: {:}\n'.format('Rod', maxrod3))
fid.write('{:}: {:.4}\n'.format('Max total power peak factor', maxtotPowerPeakFactor))
parser=argparse.ArgumentParser(description='input file name, node and ppn')
parser.add_argument('-n',action="store",dest="node",type=int,default=1)
parser.add_argument('-p',action="store",dest="ppn",type=int,default=1)
parser.add_argument('-m',action="store",dest="mode",type=str)
parser.add_argument('inp',action="store",type=str)
args=parser.parse_args()
print('inputfile=%s' %args.inp,'node=%s' %args.node,'ppn=%s' %args.ppn, 'mode=%s' %args.mode)
inp = args.inp
node = args.node
ppn = args.ppn
mode = args.mode
# set rod move step
rodstep = 10
# set mcnp input name
mcnpinp = inp
with open(mcnpinp, 'r', encoding="utf-8") as fread, open('init' + mcnpinp, 'w', encoding="utf-8") as fwrite:
for eachline in fread:
fwrite.write(eachline)
trforrod = {'tr1': '1#', 'tr3': '3#', 'tr5': '5#', 'tr6': '11#'}
maxPowerPeakFactorSearch(mcnpinp, node, ppn, trforrod, mode, rodstep)
| true |
dbe471ed6f26f9dc919eb98979e36a3abdb9cf82 | Python | ptparty/NLU-BERT | /BERT/vocab.py | UTF-8 | 2,337 | 2.921875 | 3 | [] | no_license | import pickle
import tqdm
import sys
from collections import Counter
class TorchVocab(object):
def __init__(self, vocab, specials=['<pad>', '<oov>']):
self.itos = list(specials)
for word in vocab:
self.itos.append(word)
# stoi is simply a reverse dict for itos
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
def __eq__(self, other):
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
class WordVocab(TorchVocab):
def __init__(self, vocab):
self.pad_index = 0
self.cls_index = 1
self.unk_index = 2
self.mask_index = 3
super().__init__(vocab, specials=["[PAD]", "[CLS]", "[UNK]", "[MASK]"])
def to_seq(self, sentence, tokenizer, seq_len=None, with_cls=False, with_len=False):
if isinstance(sentence, str):
sentence = tokenizer.EncodeAsPieces(sentence)
seq = [self.stoi.get(word, self.unk_index) for word in sentence]
if with_cls:
seq = [self.cls_index] + seq
origin_seq_len = len(seq)
if seq_len is None:
pass
elif len(seq) <= seq_len:
seq += [self.pad_index for _ in range(seq_len - len(seq))]
else:
seq = seq[:seq_len]
return (seq, origin_seq_len) if with_len else seq
def from_seq(self, seq, join=False, with_pad=False):
words = [self.itos[idx]
if idx < len(self.itos)
else "<%d>" % idx
for idx in seq
if not with_pad or idx != self.pad_index]
return " ".join(words) if join else words
@staticmethod
def load_vocab(vocab_path):
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
| true |
c513ebf98e79f56f3c28baff470ccf042dfcd6af | Python | shubhamguptaiitd/bitcoin | /Message.py | UTF-8 | 295 | 2.96875 | 3 | [] | no_license | class Message():
def __init__(self,type,msg,src,dst):
self.type = type ##### Add to block,
self.msg = msg
self.src = src
self.dst = dst
def __str__(self):
return self.type + ":" + str(self.msg)+ "--" + str(self.src) + "->" + str(self.dst)
| true |
e6b4a735ff53bd5c7f3a39f4dde23758d845bcfa | Python | JohanEddeland/advent_of_code | /2017/04/test_aoc_04.py | UTF-8 | 445 | 3.125 | 3 | [] | no_license | """ test_aoc_04.py
Test for Advent of Code 2017 day 04
"""
import aoc_04
def test_valid_password():
assert aoc_04.valid('aa bb cc dd ee') == True
def test_invalid_password_repeated_word():
assert aoc_04.valid('aa bb cc dd aa') == False
def test_valid_password_similar_word():
assert aoc_04.valid('aa bb cc dd aaa') == True
def test_invalid_password_anagram():
assert aoc_04.valid_part_2('abcde xyz ecdab') == False
| true |
c447189719f862b2970d7d3abc1aafefba3060c4 | Python | heineman/algorithms-nutshell-2ed | /PythonCode/adk/region.py | UTF-8 | 3,700 | 3.6875 | 4 | [
"MIT"
] | permissive | """
Defined rectangular region
"""
maxValue = 2147483647
minValue = -2147483648
X = 0
Y = 1
class Region:
"""Represents region in Cartesian space"""
def __init__(self, xmin,ymin, xmax,ymax):
"""
Creates region from two points (xmin,ymin) to (xmax,ymax). If these are not the bottom left
and top right coordinates for a region, this constructor will properly compute them.
"""
self.x_min = xmin if xmin < xmax else xmax
self.y_min = ymin if ymin < ymax else ymax
self.x_max = xmax if xmax > xmin else xmin
self.y_max = ymax if ymax > ymin else ymin
def copy(self):
"""Return copy of region"""
return Region(self.x_min, self.y_min, self.x_max, self.y_max)
def area(self):
"""Return area of rectangular region."""
return (self.x_max - self.x_min)*(self.y_max - self.y_min)
def unionRect(self, other):
"""Return new region as union of two regions."""
mx1 = min(self.x_min, other.x_min)
mx2 = max(self.x_max, other.x_max)
my1 = min(self.y_min, other.y_min)
my2 = max(self.y_max, other.y_max)
return Region(mx1, my1, mx2, my2)
def unionPoint(self, pt):
"""Return new region as union of region and point."""
mx1 = min(self.x_min, pt[X])
mx2 = max(self.x_max, pt[X])
my1 = min(self.y_min, pt[Y])
my2 = max(self.y_max, pt[Y])
return Region(mx1, my1, mx2, my2)
def overlap(self, other):
"""Return rectangle of intersection."""
x1 = self.x_min
y1 = self.y_min
x2 = self.x_max
y2 = self.y_max
if x1 < other.x_min: x1 = other.x_min
if y1 < other.y_min: y1 = other.y_min
if x2 > other.x_max: x2 = other.x_max
if y2 > other.y_max: y2 = other.y_max
# if x1 == x2 or y1 == y2 then line or point returned
return Region(x1, y1, x2, y2);
def overlaps(self, other):
"""Return True if rectangles overlap each other in any way."""
if self.x_max < other.x_min:
return False
if self.x_min > other.x_max:
return False
if self.y_max < other.y_min:
return False
if self.y_min > other.y_max:
return False
return True
def containsPoint(self, point):
"""Returns True if point contained in rectangle."""
if point[X] < self.x_min: return False
if point[X] > self.x_max: return False
if point[Y] < self.y_min: return False
if point[Y] > self.y_max: return False
return True
def containsRegion(self, region):
"""Returns True if region contained in rectangle."""
if region.x_min < self.x_min: return False
if region.x_max > self.x_max: return False
if region.y_min < self.y_min: return False
if region.y_max > self.y_max: return False
return True
def __str__(self):
"""Return string representation."""
return "({},{} , {},{})".format(self.x_min, self.y_min, self.x_max, self.y_max)
def __eq__(self, other):
"""Standard equality check."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
"""Standard not-equality check."""
return not self.__eq__(other)
# default maximum region
maxRegion = Region(minValue, minValue, maxValue, maxValue)
| true |
d6a98b00b7ea330fcdd4245d781546b617b74755 | Python | cconvey/tool-configs | /my-home-dir/bin/find-similar-siblings.py | UTF-8 | 1,212 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python3
import collections
import hashlib
import os
import os.path
import sys
def main( argv ):
search_root = argv[1]
basename_to_dirnames = collections.defaultdict(list)
for root, dirs, files in os.walk( search_root ):
for filename in files:
basename_to_dirnames[ filename ].append( ('file', root,) )
for dirname in dirs:
basename_to_dirnames[ dirname ].append( ('dir', root,) )
# Identify basenames that are prefixes of others...
sorted_basenames = sorted( basename_to_dirnames.keys() )
for i in range(0, len(sorted_basenames)-1):
n1 = sorted_basenames[i]
n2 = sorted_basenames[i+1]
if n2.startswith(n1):
sys.stdout.write('\n')
sys.stdout.write( "n1 = {}\n".format(n1))
sys.stdout.write( "n2 = {}\n".format(n2))
sys.stdout.write( "n1 paths:\n")
for d in basename_to_dirnames[n1]:
sys.stdout.write(" '{}'\n".format( d ))
sys.stdout.write( "n2 paths:\n")
for d in basename_to_dirnames[n2]:
sys.stdout.write(" '{}'\n".format( d ))
if __name__ == '__main__':
main( sys.argv )
| true |
4fa388613fec14bdf749b19ede701de856290c98 | Python | CognitionTree/Deep-ASL-Translator | /Python-Implementation/video_dataset.py | UTF-8 | 5,224 | 2.53125 | 3 | [] | no_license | from video import *
import glob
from random import shuffle
import numpy as np
class Video_Dataset(object):
FRONT_VIEW = 'Front'
FACE_VIEW = 'Face'
SIDE_VIEW = 'Side'
def __init__(self, path='/home/andy/Datasets/ASL/Pair_Optical_flow', view_point=FRONT_VIEW):
self.path = path
self.view_point = view_point
self.gloss = []
self.signs = []
self.gloss_to_number = {}
self.read_signs()
self.shuffle_dataset()
def read_signs(self):
i = 0
sings_paths = glob.glob(self.path + '/*')
for sign_path in sings_paths:
sign_path = sign_path + '/' + self.view_point
sign_versions_paths = glob.glob(sign_path + '/*')
for sign_version_path in sign_versions_paths:
sign = Video(sign_version_path)
self.signs.append(sign)
self.gloss.append(sign.get_gloss())
if sign.get_gloss() not in self.gloss_to_number:
self.gloss_to_number[sign.get_gloss()] = i
i += 1
def get_path(self):
return self.path
def get_gloss_to_numb(self):
return self.gloss_to_number
def get_view_point(self):
return self.view_point
def get_glosses(self):
return self.glosses
def get_gloss_at(self, pos):
return self.gloss[pos]
def get_signs(self):
return self.signs
def get_sign_at(self, pos):
return self.signs[pos]
def get_signs_matrix(self, numb_groups=36, is_m1=True):
matrix = []
for sign in self.signs:
sign_matrix = None
if is_m1:
sign_matrix = sign.get_reduced_frames_matrix(numb_groups)
else:
sign_matrix = sign.get_reduced_frames_matrix2(numb_groups)
matrix.append(sign_matrix)
print(array(matrix).shape)
return matrix
def shuffle_dataset(self):
shuffle(self.signs)
self.glosses = []
for sign in self.signs:
self.glosses.append(sign.get_gloss())
def organize_signs_by_gloss(self):
map_gloss_sign = {}
for i in range(len(self.gloss)):
cur_gloss = self.gloss[i]
cur_sign = self.signs[i]
if cur_gloss in map_gloss_sign:
map_gloss_sign[cur_gloss].append(cur_sign)
else:
map_gloss_sign[cur_gloss] = [cur_sign]
return map_gloss_sign
def get_data_split(self, train_frac=0.75, val_frac=0.05, test_frac=0.2, numb_groups=36, is_videos=True):
X_train = []
y_train = []
X_test = []
y_test = []
X_val = []
y_val = []
signs_matrix = None
if is_videos:
signs_matrix = self.get_signs_matrix(numb_groups)
else:
signs_matrix = self.get_signs_matrix(numb_groups, False)
# organize dataset by gloss
map_gloss_sign = self.organize_signs_by_gloss()
# Setting up initial train fractions to 0
train_count = {}
val_count = {}
for gloss in map_gloss_sign:
train_count[gloss] = 0.0
val_count[gloss] = 0.0
for i in range(len(self.gloss)):
cur_gloss = self.gloss[i]
cur_sign = signs_matrix[i]
# Training
if (train_count[cur_gloss] / (1.0 * len(map_gloss_sign[cur_gloss]))) < train_frac:
X_train.append(cur_sign)
y_train.append(self.gloss_to_number[cur_gloss])
train_count[cur_gloss] += 1.0
elif (val_count[cur_gloss] / (1.0 * len(map_gloss_sign[cur_gloss]))) < val_frac:
X_val.append(cur_sign)
y_val.append(self.gloss_to_number[cur_gloss])
val_count[cur_gloss] += 1.0
else:
X_test.append(cur_sign)
y_test.append(self.gloss_to_number[cur_gloss])
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
X_val = np.array(X_val)
y_val = np.array(y_val)
if not is_videos:
X_train = self.reduce_videos_to_images_with_temp(X_train)
X_val = self.reduce_videos_to_images_with_temp(X_val)
X_test = self.reduce_videos_to_images_with_temp(X_test)
return (X_train, y_train), (X_val, y_val), (X_test, y_test)
else:
return (X_train, y_train), (X_val, y_val), (X_test, y_test)
def reduce_videos_to_images_with_temp(self, Vs):
Is = []
for V in Vs:
Is.append(self.reduce_video_to_image_with_temp(V))
return array(Is)
def reduce_video_to_image_with_temp(self, V):
I = zeros(V[0].shape)
for i in range(len(V)):
I += (i + 1) * V[i]
I /= (1.0 * len(V))
return I
def get_numb_classes(self):
return len(self.gloss_to_number)
def __str__(self):
seigns_str = ''
for sign in self.signs:
seigns_str += str(sign)
seigns_str += '\n'
return self.path + '\n' + str(len(self.signs)) + seigns_str
| true |
74c84cd481197be0a974a67baf82c1a3e11e4652 | Python | psorus/state | /test1.py | UTF-8 | 104 | 2.59375 | 3 | [
"MIT"
] | permissive |
class t:
def __setitem__(s,a,v):
print("setting item",a,v)
return 1
tt=t()
k=(tt["a"]=1)
| true |
ca161dcb05b1cc12db978dd275406bc8d5e33044 | Python | RaviKim/PythonParse | /1.jsonMake/fiveTest.py | UTF-8 | 4,018 | 2.515625 | 3 | [] | no_license | """forth Test"""
"""
Author : HSKIM
Date : 190618
Target : lotteimall
Difficult : Easy
ver 0.0.5
comment : 1. img url 가져오는 것 구현.
2. json 파일로 만드는 것 구현
3. 환경변수 최대한 이용할 것
"""
from selenium import webdriver
from bs4 import BeautifulSoup as BS
import json
import csv
import requests
import urllib.request
# Path & ENV params
chromedriver_path = '/Users/ravikim/Documents/chromedriver'
url = 'http://www.lotteimall.com/display/sellRank100GoodsList.lotte'
f = open('./parseTest.txt', 'w')
n = open('./testText.txt', 'a')
ilottemall_crawlText = 'ilottemall_crawlText.txt'
hyundaihmall_crawlText = 'hyungdaihmall_crawlText.txt'
lottedotcom_crawlText = 'lottedotcom_crawlText.txt'
ilottemall_url = 'http://www.lotteimall.com/display/sellRank100GoodsList.lotte'
hyungdaihmall_url = 'http://www.hyundaihmall.com/front/dpd/wkBestTypeTot.do?dispCsfGrpGbcd=01&mode=sell§Id=168445&type=hmall&ajaxYn=Y&depth=3'
lottedotcom_url = 'http://www.lotte.com/display/viewRankingZoneMain.lotte?disp_no=5543628&disp_grp_nm=%ED%8C%A8%EC%85%98%EC%9D%98%EB%A5%98&upr_disp_no=&spick_disp_no=0&goods_sum_sct_cd=P0&goods_rnk_sct_cd=S&gen_sct_cd=A&age_sct_cd=A&dept_yn=&type=pc&tracking=RANKING_Sort_A_A#rankMiddle'
#Crawling Function
def ilottemall_crawling(html):
"""
ilottemall crawling 을 진행하는 함수입니다.
기타 다른 사이트의 경우 별도의 함수를 만들어서 진행할 예정입니다.
"""
temp_list = []
temp_dict = {}
#정보 가져오기
# tr_list = html.select('body > li > p > a')
tr_list = html.select('body li')
for tr in tr_list:
ranktop = int(tr.find('div', {'class':'rank top'}))
rank = int(tr.find('div', {'class':'rank'}))
"""body > li:nth-child(1) > div.thumb > a > img"""
img = tr.find('div', {'class':'thumb'}).find('a').find('img')['src']
img = tr.find('div', {'class':'thumb'}).find('a').find('img').get('src')
title = tr.find('p', {'class':'txt_name'}).find('a').find('strong').text
price1 = tr.find('p', {'class':'txt_price'}).find('span', {'class':'price1'}).find('strong').text
price2 = tr.find('p', {'class':'txt_price'}).find('span', {'class':'price2'}).find('strong').text
temp_list.append([ranktop, rank, img, title, price1, price2])
temp_dict[str(rank)] = {'img':img, 'title':title, 'price1':price1, 'price2':price2}
return temp_list, temp_dict
#CSV Function
def toCSV(article_list):
with open('article_table.csv', 'w', encoding='utf-8', newline='') as file:
csvfile = csv.writer(file)
for row in article_list:
csvfile.writerow(row)
#Json Function
def toJson(article_dict):
with open('article_dict.json', 'w', encoding='utf-8') as file:
json.dump(article_dict, file, ensure_ascii=False, indent='\t')
ilotte_list = []
ilotte_dict = {}
req = requests.get(url)
html = req.text
soup = BS(html, 'html.parser')
ilottemall_crawling(soup)
toCSV(ilotte_list)
toJson(ilotte_dict)
def get_text(URL):
"""
get text file function.
Just test.
"""
source_code_from_URL = urllib.request.urlopen(URL)
soup = BS(source_code_from_URL, 'html.parser', from_encoding='utf-8')
text = ''
for item in soup.find_all('li'):
text = text + str(item.find_all(text=True))
return text
def main():
open_ilotte_file = open(ilottemall_crawlText, 'w')
open_hyungdai_file = open(hyundaihmall_crawlText, 'w')
open_lottedotcom_file = open(lottedotcom_crawlText, 'w')
result_ilotte_file = get_text(ilottemall_url)
result_hyungdai_file = get_text(hyungdaihmall_url)
result_lottedotcom_file = get_text(lottedotcom_url)
open_ilotte_file.write(result_ilotte_file)
open_hyungdai_file.write(result_hyungdai_file)
open_lottedotcom_file.write(result_lottedotcom_file)
open_ilotte_file.close()
open_hyungdai_file.close()
open_lottedotcom_file.close()
if __name__ == '__main__':
main()
| true |
64fe530bdc00124955dcc00b08dbe4b4754ce8d8 | Python | leventarican/cookbook | /python/dojo/kata12.py | UTF-8 | 276 | 3.25 | 3 | [] | no_license | # https://www.codewars.com/kata/5656b6906de340bd1b0000ac/train/python
def longest(s1, s2):
a = set(s1)
b = set(s2)
c = a.union(b)
d = sorted(c)
return "".join(d)
if __name__ == "__main__":
assert(longest("aretheyhere", "yestheyarehere") == "aehrsty") | true |
1d0a4e14e40acfe94007513e7e8d4c65c1e50e95 | Python | yangyuxue2333/NAMEABILITY | /calculate/get_word2vec_similarities.py | UTF-8 | 1,151 | 2.65625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import sys
from gensim.models import word2vec
from itertools import combinations
def word2VecSimilarity(model,w1,w2):
print ('in method 1')
try:
return model.similarity(w1,w2)
except KeyError:
return 0
except:
return 0
def get_words(words_file, model):
df = pd.read_csv(words_file, header=[0,1])
trials = [str(i) for i in df.columns.levels[0]]
w2vSimilarity = {}
for trial in trials:
# one trial df 'word1' 'word2'
dt = df[trial]
dt = dt.dropna(how='any')
pairs = list(zip(dt['word1'], dt['word2']))
similarity = []
for p in pairs:
similarity.append(word2VecSimilarity(model, p[0], p[1]))
w2vSimilarity[trial] = sum(similarity)/float(len(similarity))
return w2vSimilarity
# main #
path = '/Users/Cher/Documents/UW/2017LAB/Lupyan/NAMEABILITY/name/output/GoogleNews-vectors-negative300.bin'
words_file = '../output/word_pairs.csv'
model = word2vec.KeyedVectors.load_word2vec_format(path,binary=True)
get_words(words_file, model).to_csv('w2v_similarity.csv', colums=['w2v_sim'])
| true |
132d6df83b3e5e2a22deb337699c2cce7e9d33cd | Python | JatinTiwaricodes/expmath | /plots/einfache_funktionen.py | UTF-8 | 21,328 | 3.203125 | 3 | [] | no_license | import queue
import numpy as np
from bokeh.layouts import Row, WidgetBox
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, Dropdown, Toggle
from bokeh.plotting import Figure
from extensions.Latex import LatexLabel
# Some functions are not defined for negative values or zero. Numpy will give
# out an warning. However, we simply don't want to draw this value. Therefore,
# we are going to ignore all values related to errors.
np.seterr(all="ignore")
"""
This plot will introduce the user to basic functions and let them interactively
change parameter to them. The user can select between all major function groups:
Polynomials
Trigonometrics
Exponentials
Roots
Logarithmns
Hyperbolics
Inverse Trigonometrics
Inverse Hyperbolics
Specials (Absolute, Heaviside ...)
"""
# Constants for the geometry of the plot, similar to other plots
HEIGHT = 400
WIDTH_PLOT = 600
WIDTH_TOTAL = 800
# Left-most and right-most points to still contain value pairs. They form the
# interval over which the function is drawn.
X_LEFT = -10
X_RIGHT = 10
# Initial vieport of the plot. The plain X by Y that the user sees intially.
X_RANGE_INITIAL = [-5, 5]
Y_RANGE_INTIAL = [-4, 4]
# Thickness of the function line
LINE_WIDTH = 2
# The way the function will appear in the dropdown menu. The key in the
# second element of every tuple is also used to address the dictionaries
# containing the implementation functions and the latex layout
dropdown_menu = [
("Konstantes Polynom", "constant"),
("Lineares Polynom", "linear"),
("Quadratisches Polynom", "quadratic"),
("Kubisches Polynom", "cubic"),
None,
("Sinus", "sine"),
("Kosinus", "cosine"),
("Tangens", "tangent"),
None,
("Allgemeine Exponentialfunktion", "exponential_general"),
("e-Funktion", "exponential"),
None,
("Wurzel", "root"),
("Logarithmus", "logarithmn"),
None,
("Sinus Hyperbolicus", "hyperbolic_sine"),
("Kosinus Hyperbolicus", "hyperbolic_cosine"),
("Tangens Hyperbolicus", "hyperbolic_tangent"),
None,
("Arcus-Sinus", "arc_sine"),
("Arcus-Kosinus", "arc_cosine"),
("Arcus-Tangens", "arc_tangent"),
None,
("Area Sinus Hyperbolicus", "area_hyperbolic_sine"),
("Area Kosinus Hyperbolicus", "area_hyperbolic_cosine"),
("Area Tangens Hyperbolicus", "area_hyperbolic_tangent"),
None,
("Betragsfunktion", "absolute"),
("Heaviside Funktion", "heaviside"),
]
# Defaults are the values for what the sliders are set once this function gets
# selected
functions = {
"constant": {
"definition": (lambda a, b, c, d, x: a * np.ones(x.shape)),
"latex": "f(x) = a = %1.2f",
"number_of_placeholders": 1,
"defaults": {"a": 1, "b": 0, "c": 0, "d": 0},
},
"linear": {
"definition": (lambda a, b, c, d, x: a * x + b),
"latex": "f(x) = ax + b = %1.2f \cdot x + %1.2f",
"number_of_placeholders": 2,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"quadratic": {
"definition": (lambda a, b, c, d, x: a * x**2 + b * x + c),
"latex": """
\\begin{aligned}
f(x)
=&
ax^2 + bx + c
\\\\=&
%1.2f \cdot x^2 + %1.2f \cdot x + %1.2f
\end{aligned}
""",
"number_of_placeholders": 3,
"defaults": {"a": 1, "b": 0, "c": -1, "d": 0},
},
"cubic": {
"definition": (lambda a, b, c, d, x:
a * x**3 + b * x**2 + c * x + d),
"latex": """
\\begin{aligned}
f(x)
=&
ax^3 + bx^2 + cx + d
\\\\=&
%1.2f \cdot x^3 + %1.2f \cdot x^2 + %1.2f \cdot x + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 0, "c": 0, "d": 0},
},
"sine": {
"definition": (lambda a, b, c, d, x: a * np.sin(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \sin(bx + c) + d
\\\\=&
%1.2f \cdot \sin(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"cosine": {
"definition": (lambda a, b, c, d, x: a * np.cos(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \cos(bx + c) + d
\\\\=&
%1.2f \cdot \cos(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"tangent": {
"definition": (lambda a, b, c, d, x: a * np.tan(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \\tan(bx + c) + d
\\\\=&
%1.2f \cdot \\tan(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"exponential_general": {
"definition": (lambda a, b, c, d, x: a * b**(x+c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot b^{x+c} + d
\\\\=&
%1.2f \cdot %1.2f ^ {x + %1.2f} + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 2, "c": 0, "d": 0},
},
"exponential": {
"definition": (lambda a, b, c, d, x: a * np.exp(b*(x+c)) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot e^{b(x+c)} + d
\\\\=&
%1.2f \cdot e^{%1.2f (x + %1.2f)} + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"root": {
"definition": (lambda a, b, c, d, x: a * (x + c)**(1/b) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \sqrt[b]{x + c} + d
\\\\=&
%1.2f \cdot \sqrt[ %1.2f ]{x + %1.2f} + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 2, "c": 0, "d": 0},
},
"logarithmn": {
"definition": (lambda a, b, c, d, x: a * np.log(b*(x+c)) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \ln(b\cdot(x+c)) + d
\\\\=&
%1.2f \cdot \ln(%1.2f\cdot (x + %1.2f)) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"hyperbolic_sine": {
"definition": (lambda a, b, c, d, x: a * np.sinh(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \sinh(bx + c) + d
\\\\=&
%1.2f \cdot \sinh(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"hyperbolic_cosine": {
"definition": (lambda a, b, c, d, x: a * np.cosh(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \cosh(bx + c) + d
\\\\=&
%1.2f \cdot \cosh(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"hyperbolic_tangent": {
"definition": (lambda a, b, c, d, x: a * np.tanh(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a cdot\\tanh(bx + c) + d
\\\\=&
%1.2f \cdot \\tanh(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"arc_sine": {
"definition": (lambda a, b, c, d, x:
a * np.arcsin(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \\arcsin(bx + c) + d
\\\\=&
%1.2f \cdot \\arcsin(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"arc_cosine": {
"definition": (lambda a, b, c, d, x:
a * np.arccos(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \\arccos(bx + c) + d
\\\\=&
%1.2f \cdot \\arccos(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"arc_tangent": {
"definition": (lambda a, b, c, d, x:
a * np.arctan(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \\arctan(bx + c) + d
\\\\=&
%1.2f \cdot \\arctan(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"area_hyperbolic_sine": {
"definition": (lambda a, b, c, d, x:
a * np.arcsinh(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot\\text{ arsinh}(bx + c) + d
\\\\=&
%1.2f \cdot \\text{ arsinh}(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"area_hyperbolic_cosine": {
"definition": (lambda a, b, c, d, x:
a * np.arccosh(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \\text{ arcosh}(bx + c) + d
\\\\=&
%1.2f \cdot \\text{ arcosh}(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"area_hyperbolic_tangent": {
"definition": (lambda a, b, c, d, x:
a * np.arctanh(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot \\text{ artanh}(bx + c) + d
\\\\=&
%1.2f \cdot \\text{ artanh}(%1.2f x + %1.2f) + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"absolute": {
"definition": (lambda a, b, c, d, x: a * np.absolute(b*x + c) + d),
"latex": """
\\begin{aligned}
f(x)
=&
a \cdot |bx + c | + d
\\\\=&
%1.2f \cdot |%1.2f x + %1.2f | + %1.2f
\end{aligned}
""",
"number_of_placeholders": 4,
"defaults": {"a": 1, "b": 1, "c": 0, "d": 0},
},
"heaviside": {
"definition": (lambda a, b, c, d, x:
a * np.piecewise(x, [x < b, b <= x], [0, 1]) + c),
"latex": """
\\begin{aligned}
f(x) =& a + b \cdot
\\begin{cases}
0 & x < c \\\\
1 & x \ge c
\end{cases}
\\\\=& %1.2f + %1.2f \cdot
\\begin{cases}
0 & x < %1.2f \\\\
1 & x \ge c
\end{cases}
\end{aligned}""",
"number_of_placeholders": 3,
"defaults": {"a": 1, "b": 0, "c": 0, "d": 0}
},
}
def update_plot(function_active, a, b, c, d):
if function_active == "absolute":
x_left_of_c = np.linspace(X_LEFT, c - 0.0001, 200)
x_right_of_c = np.linspace(c + 0.0001, X_RIGHT, 200)
x = np.concatenate([x_left_of_c, np.array([c, ]), x_right_of_c])
elif function_active == "heaviside":
x_left_of_b = np.linspace(X_LEFT, b - 0.0001, 200)
x_right_of_b = np.linspace(b + 0.0001, X_RIGHT, 200)
x = np.concatenate([x_left_of_b, x_right_of_b])
else:
x = np.linspace(X_LEFT, X_RIGHT, 400)
y = functions[function_active]["definition"](a, b, c, d, x)
return x, y
def update_latex(function_active, a, b, c, d):
unformatted_string = functions[function_active]["latex"]
number_of_placeholders =\
functions[function_active]["number_of_placeholders"]
formatted_string = ""
if number_of_placeholders == 1:
formatted_string = unformatted_string % a
elif number_of_placeholders == 2:
formatted_string = unformatted_string % (a, b)
elif number_of_placeholders == 3:
formatted_string = unformatted_string % (a, b, c)
elif number_of_placeholders == 4:
formatted_string = unformatted_string % (a, b, c, d)
return formatted_string
# The ColumnDataSource abstracts the sending of value pairs to the client.
# Whenever the data attribute changes the new information is sent over the open
# WebSocket
data_source = ColumnDataSource()
data_source_second = ColumnDataSource(data={"x": [], "y": []})
plot = Figure(plot_height=HEIGHT, plot_width=WIDTH_PLOT,
x_range=X_RANGE_INITIAL, y_range=Y_RANGE_INTIAL)
plot.toolbar.active_drag = None
# Get the same tick (grid lines) depth for both axes
plot.xaxis[0].ticker.desired_num_ticks =\
2*plot.yaxis[0].ticker.desired_num_ticks
# Indicate the x-axis and y_axis by thin black lines
plot.line(x=[X_LEFT, X_RIGHT], y=[0, 0], color="black")
plot.line(x=[0, 0], y=[X_LEFT, X_RIGHT], color="black")
# The first function is plotted right away, the second will be visible, once the
# toggle is activated
plot.line(x="x", y="y", source=data_source, color="blue", line_width=LINE_WIDTH)
plot.line(x="x", y="y", source=data_source_second, color="red",
line_width=LINE_WIDTH)
function_selector = Dropdown(label="Funktion auswählen", button_type="warning",
menu = dropdown_menu, value="constant")
parameter_a = Slider(title="Parameter a", start=-2, end=2, step=0.1, value=1)
parameter_b = Slider(title="Parameter b", start=-2, end=2, step=0.1, value=1)
parameter_c = Slider(title="Parameter c", start=-2, end=2, step=0.1, value=1)
parameter_d = Slider(title="Parameter d", start=-2, end=2, step=0.1, value=1)
parameter_sliders = (parameter_a, parameter_b, parameter_c, parameter_d)
# This toggle is used to activate a second function and once it is activated to
# switch between controling the first and the second function
second_toggle = Toggle(label="Zweite Funktion g (de-)aktivieren")
# Lable extension rendered by the help of KaTex on the client-side
function_latex = LatexLabel(text = "", x = WIDTH_PLOT - 20, y = 80,
x_units="screen", y_units="screen", render_mode="css",
text_font_size="12pt", background_fill_alpha=0)
plot.add_layout(function_latex)
function_latex_second = LatexLabel(text = "", x = WIDTH_PLOT - 20, y = 30,
x_units="screen", y_units="screen", render_mode="css",
text_font_size="12pt", background_fill_alpha=0)
plot.add_layout(function_latex_second)
def update_all():
"""
General Update Routine: Read widget values, calculate value pairs and update
the the Latex Label.
"""
x, y = update_plot(function_selector.value, parameter_a.value,
parameter_b.value, parameter_c.value, parameter_d.value)
if not second_toggle.active:
data_source.data = {"x": x, "y": y}
else:
data_source_second.data = {"x": x, "y": y}
text_for_label = update_latex(function_selector.value,
parameter_a.value, parameter_b.value, parameter_c.value,
parameter_d.value)
if not second_toggle.active:
function_latex.text = text_for_label
else:
text_for_label_second = text_for_label.replace("f(x)", "g(x)")
function_latex_second.text = text_for_label_second
def update_slider(attr, old, new):
update_all()
def update_dropdown(attr, old, new):
"""
Similar to the one used above but also reset the sliders to the defaults for
the selected function.
"""
new_defaults = functions[function_selector.value]["defaults"]
parameter_a.value = new_defaults["a"]
parameter_b.value = new_defaults["b"]
parameter_c.value = new_defaults["c"]
parameter_d.value = new_defaults["d"]
update_all()
# When the user switches between the two functions remember the values of the
# widgets
saved_parameters_from_other_function = queue.Queue(2)
# Standard parameters for the second function are similar to those for the first
saved_parameters_from_other_function.put({
"function_active": "constant",
"a": 2,
"b": 0,
"c": 0,
"d": 0,
})
def toggle_callback(source):
global saved_parameters_from_other_function
current_parameter = {
"function_active": function_selector.value,
"a": parameter_a.value,
"b": parameter_b.value,
"c": parameter_c.value,
"d": parameter_d.value
}
saved_parameters_from_other_function.put(current_parameter)
previous_parameter = saved_parameters_from_other_function.get()
function_selector.value = previous_parameter["function_active"]
parameter_a.value = previous_parameter["a"]
parameter_b.value = previous_parameter["b"]
parameter_c.value = previous_parameter["c"]
parameter_d.value = previous_parameter["d"]
update_all()
# Call the callback function in advance to populate the plot
update_all()
# Connect the callbacks with the corresponding wigets
for slider in parameter_sliders:
slider.on_change("value", update_slider)
function_selector.on_change("value", update_dropdown)
second_toggle.on_click(toggle_callback)
# Assemble the plot
inputs = WidgetBox(function_selector, *parameter_sliders, second_toggle)
curdoc().add_root(Row(plot, inputs, width=WIDTH_TOTAL))
| true |
f240a0410f2a40313842bcab5dec6a2bd8f88e19 | Python | realnow/R_script | /R call python scripts/splitstr.py | UTF-8 | 262 | 3.71875 | 4 | [] | no_license | # splitstr.py
import sys
# Get the arguments passed in
string = sys.argv[1]
pattern = sys.argv[2]
# Perform the splitting
ans = string.split(pattern)
# Join the resulting list of elements into a single newline
# delimited string and print
print('n'.join(ans)) | true |
4bb98ea34ab1c8ac93448dcadbae214d11bec424 | Python | mateegojra/python | /mysql/connection.py | UTF-8 | 1,828 | 3.421875 | 3 | [] | no_license | import mysql.connector
import os
clear = lambda: os.system('cls')
mydb = mysql.connector.connect(host= "localhost", user="root", password="", database="python_practice")
handler = mydb.cursor()
#handler.execute("CREATE TABLE myFriends(f_id INT(6) UNSIGNED AUTO_INCREMENT PRIMARY KEY, full_name VARCHAR(30), city VARCHAR(20), added_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP ON UPDATE CURRENT_TIMESTAMP)")
def AddNew():
n = input("Enter Friend's Name: ")
c = input("Enter City: ")
q = "INSERT INTO myfriends(full_name, city) VALUES(%s, %s)"
v = (n,c)
handler.execute(q,v)
mydb.commit()
def show():
q = "SELECT * FROM myfriends"
handler.execute(q)
res = handler.fetchall()
for f in res:
print(f[1]+"------"+f[2])
def delFriend(fid):
q = "DELETE FROM myfriends WHERE f_id = %s"
v = (fid,)
handler.execute(q,v)
mydb.commit()
show()
def update():
oname = input("Enter Old Name: ")
nname = input("Enter New Name: ")
ncity = input("Enter His/Her City: ")
q = "UPDATE myfriends SET full_name = %s, city = %s WHERE full_name = %s"
p = (nname, ncity, oname)
handler.execute(q,p)
mydb.commit()
show()
def mainMenue():
clear()
print("******Main Menue******")
print("1- Add New Record\n2- Show Record\n3- Del a Friend\n4- Update Friend's List")
what = input("Enter Your Choice: ")
if what == str('1'):
AddNew()
mainMenue()
elif what == str('2'):
show()
mainMenue()
elif what == str('3'):
fid = input("Enter Friend's ID to Delete? ")
delFriend(fid)
mainMenue()
elif what == str('4'):
update()
mainMenue()
else:
print("Invalid Option")
mainMenue()
mainMenue()
print("***********END************")
| true |
247ee4dd43ff58d12fe814c2a2dc9a487d324007 | Python | 191820061/CS61A | /chapter2/abstractData.py | UTF-8 | 3,507 | 3.265625 | 3 | [] | no_license | def mobile(left, right):
"""Construct a mobile from a left arm and a right arm."""
assert is_arm(left), "left must be a arm"
assert is_arm(right), "right must be a arm"
return ['mobile', left, right]
def is_mobile(m):
"""Return whether m is a mobile."""
return type(m) == list and len(m) == 3 and m[0] == 'mobile'
def left(m):
"""Select the left arm of a mobile."""
assert is_mobile(m), "must call left on a mobile"
return m[1]
def right(m):
"""Select the right arm of a mobile."""
assert is_mobile(m), "must call right on a mobile"
return m[2]
def arm(length, mobile_or_planet):
"""Construct a arm: a length of rod with a mobile or planet at the end."""
assert is_mobile(mobile_or_planet) or is_planet(mobile_or_planet)
return ['arm', length, mobile_or_planet]
def is_arm(s):
"""Return whether s is a arm."""
return type(s) == list and len(s) == 3 and s[0] == 'arm'
def length(s):
"""Select the length of a arm."""
assert is_arm(s), "must call length on a arm"
return s[1]
def end(s):
"""Select the mobile or planet hanging at the end of a arm."""
assert is_arm(s), "must call end on a arm"
return s[2]
def planet(size):
assert size > 0
return ['planet', size]
def size(w):
assert is_planet(w), 'must call size on a planet'
return w[1]
def is_planet(w):
"""Whether w is a planet."""
return type(w) == list and len(w) == 2 and w[0] == 'planet'
def examples():
t = mobile(arm(1, planet(2)),
arm(2, planet(1)))
u = mobile(arm(5, planet(1)),
arm(1, mobile(arm(2, planet(3)),
arm(3, planet(2)))))
v = mobile(arm(4, t), arm(2, u))
return t, u, v
def total_weight(m):
if is_planet(m):
return size(m)
else:
assert is_mobile(m), "must get total weight of a mobile or a planet"
return total_weight(end(left(m))) + total_weight(end(right(m)))
def balanced(m):
assert is_mobile(m)
if is_mobile(end(left(m))) and balanced(end(left(m))) == False:
return False
if is_mobile(end(right(m))) and balanced(end(right(m))) == False:
return False
return (total_weight(end(left(m))) * length(left(m))) == (total_weight(end(right(m))) * length(right(m)))
from ADT import tree, label, branches, is_leaf, print_tree
def totals_tree(m):
assert is_mobile(m) or is_planet(m)
if is_mobile(m):
return tree(total_weight(m), [totals_tree(end(left(m))), totals_tree(end(right(m)))])
elif is_planet(m):
return tree(size(m))
def preorder(t):
result = list()
node_stack = list([t])
while len(node_stack) != 0:
cur = node_stack.pop()
result.append(label(cur))
if len(cur) > 1:
for i in branches(cur)[::-1]:
node_stack.append(i)
return result
def insert_items(lst, entry, elem):
n = len(lst)
i = 0
while i < n:
if lst[i] == entry:
lst.insert(i + 1, elem)
i += 1
n += 1
i += 1
return lst
def has_path(t, word):
assert len(word) > 0, 'no path for empty word.'
if label(t) != word[0]:
return False
elif len(word) == 1:
return True
for b in branches(t):
if has_path(b, word[1:]):
return True
return False
greetings = tree('h', [tree('i'), tree('e', [tree('l', [tree('l', [tree('o')])]), tree('y')])])
print(has_path(greetings, 'hell'))
| true |
bf703a22c1e5d8b8921950bd9352e52da819379b | Python | AdvancedNetworkingSystems/IFloodS | /tandem_queue.py | UTF-8 | 2,853 | 3.171875 | 3 | [] | no_license | import random
import heapq
class EventScheduler(object):
def __init__(self):
self.queue = []
self.time = 0
self.last = 0
def schedule_event(self, interval, e):
t = self.time + interval
if t > self.last:
self.last = t
heapq.heappush(self.queue, (t, e))
def pop_event(self):
e = heapq.heappop(self.queue)
self.time = e[0]
return e[1]
def elapsed_time(self):
return self.time
def last_event_time(self):
return self.last
def simulate_multi_queue(time, feedback_prob=0):
l = 1 # arrival rate
customers = {0: 0, 1: 0, 2: 0, 3: 0}
mean_service_time = {0: 0.5, 1: 0.7, 2: 0.8, 3: 0.95}
intervals = {q: (mean_service_time[q]/2, mean_service_time[q]*3/2)
for q in mean_service_time}
sched = EventScheduler()
sched.schedule_event(random.expovariate(l), 0)
filename = "queue_length_fprob" + str(feedback_prob) + ".csv"
fp = open(filename, "w")
fp.write("time,q0len,q1len,q2len,q3len\n")
while(sched.elapsed_time() < time):
queue = sched.pop_event()
# we have an arrival in queue number "queue"
# update of queue-1
if queue - 1 < 0:
sched.schedule_event(random.expovariate(l), queue)
else:
customers[queue-1] -= 1
if customers[queue-1] > 0:
sched.schedule_event(random.uniform(intervals[queue-1][0],
intervals[queue-1][1]),
queue)
# update of queue
if queue <= 3:
customers[queue] += 1
if customers[queue] == 1:
sched.schedule_event(random.uniform(intervals[queue][0],
intervals[queue][1]),
queue+1)
else:
if feedback_prob > 0:
# we feedback to queue 2
feedback_queue = 2
if random.uniform(0, 1) < feedback_prob:
customers[feedback_queue] += 1
if customers[feedback_queue] == 1:
sched.schedule_event(random.uniform(
intervals[feedback_queue][0],
intervals[feedback_queue][1]),
feedback_queue+1)
fp.write(str(sched.elapsed_time()) + "," +
",".join([str(el) for el in customers.values()]) + "\n")
fp.close()
if __name__ == '__main__':
simulate_multi_queue(time=10000)
simulate_multi_queue(time=10000, feedback_prob=0.04)
simulate_multi_queue(time=10000, feedback_prob=0.06)
| true |
0113bb6085f6e4762104f841b0df1783052fc83a | Python | vpalex999/my_grokking_algorithms | /02_selection_sort.py/01_selection_sort.py | UTF-8 | 828 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | """
Алгоритмы сортировки
"""
def selection_sort(arr):
"""Сортировка выбором по возрастанию O(n^2)."""
selected_arr = []
while True:
curr_index = None
# найти наименьший элемент
for el_index in range(len(arr)):
if curr_index is None:
curr_index = el_index
if arr[curr_index] > arr[el_index]:
curr_index = el_index
continue
# перенести его в новый массив
if curr_index is not None:
selected_arr.append(arr[curr_index])
del arr[curr_index]
curr_index = None
else:
break
return selected_arr
my_arr = [5, 3, 6, 2, 10]
print(selection_sort(my_arr))
| true |
1353442305d442530fb23b880525b2f8e1d6f74e | Python | richruizv/school_scrapper | /fusiona_archivos.py | UTF-8 | 717 | 2.59375 | 3 | [] | no_license | import os
import glob
import pandas as pd
def run():
extension = 'csv'
os.chdir("csv/prod/")
all_filenames = [os.path.splitext(i)[0] for i in glob.glob('*.{}'.format(extension))]
fout=open("../final/combined_csv.csv","a",encoding="utf-8",errors="ignore")
for filename in all_filenames:
f = open(filename+'.csv',encoding="utf-8")
for line in f:
new_line = '"'+filename+'",'+line
fout.write(new_line)
fout.close()
#for f in all_filenames:
# f = open(f,encoding="utf-8")
# for line in f:
# fout.write(line)
#f.close() # not really needed
#fout.close()
if __name__ == "__main__":
run() | true |
f65c578e933b3f1928ee1a4b9db22fb05ac10e81 | Python | ikramulkayes/Python-practice-codewars.com- | /untitled79.py | UTF-8 | 1,917 | 3.421875 | 3 | [] | no_license | seconds = 7755
if seconds < 60:
print(seconds)
elif seconds < 3600:
minutes = seconds//60
seconds = seconds - 60*minutes
print(f"{minutes} minutes and {seconds} seconds")
elif seconds < 86400:
hours = seconds//3600
print(hours)
seconds = seconds - hours * 3600
print(seconds)
if seconds > 60:
minutes = seconds//60
print(minutes)
seconds = seconds - 60*minutes
print(seconds)
if hours > 1 and minutes > 1 and seconds > 1:
print(f"{hours} hours {minutes} minutes and {seconds} seconds")
elif hours > 1 and minutes > 1 and seconds == 1:
print(f"{hours} hours {minutes} minutes and {seconds} second")
elif hours >1 and minutes == 1 and seconds > 1:
print(f"{hours} hours {minutes} minute and {seconds} seconds")
elif hours == 1 and minutes > 1 and seconds >1:
print(f"{hours} hour {minutes} minutes and {seconds} seconds")
elif hours > 1 and minutes == 1 and seconds == 1:
print(f"{hours} hours {minutes} minute and {seconds} second")
elif hours == 1 and minutes == 1 and seconds == 1:
print(f"{hours} hour {minutes} minute and {seconds} second")
elif seconds < 60:
if hours > 1 and seconds > 1:
print(f"{hours} hour and {seconds} seconds")
elif hours > 1 and seconds == 1:
print(f"{hours} hour 74 {seconds} second")
elif hours == 1 and seconds == 1:
print(f"{hours} hour and {seconds} second")
else:
if hours > 1:
print(f"{hours} hours")
else:
print(f"{hours} hours")
elif seconds < 31536000 :
days = seconds //3600
seconds = seconds - days*3600
if seconds > 3600:
hours = seconds // 3600
seconds = seconds - hours*3600
| true |
773be4ccdd4cdab2be178df85f7fbf3cced80ce0 | Python | markcheno/clever_algorithms | /genetic_algorithm.py | UTF-8 | 2,482 | 3.484375 | 3 | [] | no_license | # Genetic Algorithm in the Python Programming Language
# Based on: The Clever Algorithms Project: http://www.CleverAlgorithms.com
# (c) Copyright 2012 Mark Chenoweth.
# This work is licensed under a Creative Commons Attribution-Noncommercial-Share License.
import random,operator
def fitness(bitstring): # OneMax problem. Seeking binary string of all 1's
return sum(int(bitstring[x]) for x in xrange(len(bitstring)))
def random_bitstring(num_bits):
return "".join(random.choice("01") for i in xrange(num_bits))
def binary_tournament(pop):
i, j = random.sample(xrange(len(pop)),2)
return pop[i] if pop[i]['fitness'] > pop[j]['fitness'] else pop[j] # < = minimize, > = maximize
def point_mutation(bs,rate):
return "".join([("0" if bs[i]=="1" else "1") if (random.random()<rate) else bs[i] for i in xrange(len(bs))])
def crossover(parent1,parent2,p_crossover):
if random.random()>=p_crossover: return parent1
point = random.randint(1,len(parent1)-1)
return parent1[:point]+parent2[point:]
def reproduce(selected,p_crossover,p_mutation):
children = []
for i in xrange(len(selected)-1):
child = {}
child["bitstring"] = crossover(selected[i]["bitstring"],selected[i+1]["bitstring"],p_crossover)
child["bitstring"] = point_mutation(child["bitstring"],p_mutation)
children.append(child)
return children
def search(max_gens,num_bits,pop_size,p_crossover,p_mutation):
pop = [{'bitstring':random_bitstring(num_bits)} for i in xrange(pop_size)]
for c in pop: c["fitness"] = fitness(c["bitstring"])
best = sorted(pop,key=operator.itemgetter("fitness"))[-1] # [0] = minimize, [-1] = maximize
for gen in xrange(max_gens):
selected = [binary_tournament(pop) for i in xrange(pop_size)]
children = reproduce(selected,p_crossover,p_mutation)
for c in children: c["fitness"] = fitness(c["bitstring"])
children = sorted(children,key=operator.itemgetter("fitness"))
if children[-1]["fitness"] >= best["fitness"]: best=children[-1] # [0] = minimize, [-1] = maximize
pop = children
print ">%d: %d, %s" % (gen,best["fitness"],best["bitstring"])
if best["fitness"]==num_bits: break
return best
if __name__ == '__main__':
num_bits = 64 # problem configuration
max_gens = 50 # algorithm configuration
pop_size = 100
p_crossover = 0.98
p_mutation = 1.0/num_bits
best = search(max_gens,num_bits,pop_size,p_crossover,p_mutation)
print "best: %d, %s" % (best["fitness"],best["bitstring"])
| true |
359b65ddb22bf728a9543b841a82344f5fb7ef63 | Python | peiyic2/dl_codebase | /modules/dataset/scannet_25k.py | UTF-8 | 2,557 | 2.5625 | 3 | [] | no_license | import sys
import os
import json
import numpy as np
import torch
import torchvision
from torchvision import datasets, transforms
from PIL import Image
from .baseset import base_set
class ScanNet25K(datasets.vision.VisionDataset):
'''
Semantic segmentation of ScanNet 25K downsampled data.
Data availabel at http://kaldir.vc.in.tum.de/scannet/v2/tasks/scannet_frames_25k.zip
'''
def __init__(self, root):
super(ScanNet25K, self).__init__(root, None, None, None)
root = os.path.join(root, "scannet_frames_25k")
scene_list = sorted(os.listdir(root))
self.img_paths = []
self.target_paths = []
self.to_tensor_func = torchvision.transforms.ToTensor()
for scene_name in scene_list:
color_dir = os.path.join(root, scene_name, "color")
label_dir = os.path.join(root, scene_name, "label")
color_img_list = sorted(os.listdir(color_dir))
label_img_list = sorted(os.listdir(label_dir))
# color and label len should be equal
assert len(color_img_list) == len(label_img_list)
# assert frame numbers are the same. We can't use == directly because
# RGB images are stored as .jpg while labels are stored as .png
assert [i.split('.')[0] for i in color_img_list] == [i.split('.')[0] for i in label_img_list]
color_img_paths = [os.path.join(root, scene_name, "color", fn) for fn in color_img_list]
label_img_paths = [os.path.join(root, scene_name, "label", fn) for fn in label_img_list]
self.img_paths = self.img_paths + color_img_paths
self.target_paths = self.target_paths + label_img_paths
def __getitem__(self, idx):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target), where target is a tensor of shape (H, W) and type torch.uint64.
Each element is ranged between (0, num_classes - 1).
"""
# Get image name and paths
img = Image.open(self.img_paths[idx]).convert("RGB")
img = self.to_tensor_func(img)
target = Image.open(self.target_paths[idx])
target_np = np.array(target, dtype=np.long)
return img, torch.tensor(target_np)
def __len__(self):
return len(self.img_paths)
def get_train_set(cfg):
ds = ScanNet25K("/data")
return base_set(ds, "train", cfg)
def get_val_set(cfg):
ds =ScanNet25K("/data")
return base_set(ds, "test", cfg) | true |
48e3279f03b7d5b9fbbf59966af979f0fb4d7963 | Python | brooksandrew/postman_problems | /postman_problems/tests/utils.py | UTF-8 | 668 | 3.4375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import csv
from io import StringIO
def create_mock_csv_from_dataframe(df):
"""
Converts a pandas DataFrame to a StringIO object.
Used to mock tests of functions that read from the filesystem, so we only need to keep track of one source of truth.
Args:
df (pandas dataframe): to be converted into a StringIO object
Returns: io.StringIO representation of `df`
"""
csvfile = StringIO()
csvfile.seek(0)
fieldnames = df.columns
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in df.iterrows():
writer.writerow(row[1].to_dict())
csvfile.seek(0)
return csvfile | true |
49e1802cf256529694a4498d0092469e3d1ecbe0 | Python | HaoYun519/Python | /彭彭Python入門/backup/function-args.py | UTF-8 | 346 | 3.71875 | 4 | [] | no_license | # 參數的預設資料
# def power(base,exp=0):
# print(base**exp)
# power(3,2)
# power(4)
# 使用參數名稱對應
# def divide(n1,n2):
# print(n1/n2)
# divide(2,4)
# divide(n2=2,n1=4)
# 無限/不定 參數資料
def avg(*ns):
sum = 0
for n in ns:
sum += n
print(sum/len(ns))
avg(3,4)
avg(3,5,10)
avg(1,4,-1,-8) | true |
76205ea66a486f8e493bc919a3fdf76c983f993d | Python | Dbof/adventcode-15 | /Day 17/day17-1.py | UTF-8 | 739 | 3.515625 | 4 | [] | no_license | # this could be made faster with sorted list
def find_combinations():
con = list(containers)
print(con)
return find(liters, con)
# very simple backtracking algorithm
def find(curr_value, con):
copy = con[:] # create new copy
count = 0
for c in con:
copy.remove(c)
remaining = curr_value - c
if remaining < 0:
# no solution
pass
elif remaining == 0:
# found solution
count += 1
else:
# might have multiple solutions
count += find(remaining, copy)
return count
liters = 150
containers = []
with open('input.txt') as f:
for line in f:
containers += [int(line)]
print(find_combinations())
| true |
04358e55923585ddc2957fe38183cbe1f2142ba9 | Python | SrickySu/myPython | /com/surichard/spider/straightFlush/OutputManager.py | UTF-8 | 645 | 2.96875 | 3 | [] | no_license | #coding=utf-8
'''
Created on 2019年1月20日
@author: 74518
'''
class OutputManager(object):
def __init__(self):
self.data = []
def collectData(self, data):
if data is None:
return
if isinstance(data, list):
for item in data:
self.data.append(item)
else:
self.data.append(data)
def output(self, fileName):
file = open(fileName, 'w')
for item in self.data:
file.write('%s,%s' % (item[0].encode('utf-8'),item[1].encode('utf-8')))
file.write('\n')
file.close()
| true |
32e7a4529cbbc9457e0bbd8f8576068c220092f4 | Python | keyber/RP | /projet/plne.py | UTF-8 | 5,405 | 2.6875 | 3 | [] | no_license | from gurobipy import *
import utils
import solution
from time import time
import numpy as np
# noinspection PyArgumentList
def _ins_to_plne(first, ins: utils.Instance, relaxation_lin, verbose=False):
# rajoute un sommet fictif pour lequel tous les coûts entrants et sortants sont nuls
# on obtient alors facilement la chaîne optimale en enlevant le sommet fictif
n = len(ins.data) + 1
V = range(n)
m = Model()
m.setParam('OutputFlag', False)
t = time()
# coefficients de la fonction objectif
c = np.empty((n, n), dtype=int)
for i in V[:-1]:
for j in V[i + 1 : -1]:
s = utils.score_transition_data(ins.data[i], ins.data[j])
c[i, j] = s
c[j, i] = s
# coefficients du sommet fictif
for i in V[:-1]:
c[n - 1, i] = utils.score_transition_data(first, ins.data[i])
c[i, n - 1] = 0
if verbose:
print("score", round(time()-t, 1))
t = time()
# déclaration des variables de décision
x = np.empty((n,n), dtype=object)
for i in V:
for j in V:
if relaxation_lin:
x[i][j] = m.addVar(vtype=GRB.CONTINUOUS, lb=0, ub=1)
else:
x[i][j] = m.addVar(vtype=GRB.BINARY)
for i in V:
m.addConstr(x[i][i]==0)
# sous tours - variables
z = np.empty((n,n), dtype=object)
for i in V:
for j in V:
z[i][j] = m.addVar(vtype=GRB.CONTINUOUS, lb=0)
if i == j or j == 0:
m.addConstr(z[i][j]==0)
if verbose:
m.update()
print("vars", round(time()-t, 1))
t = time()
# définition de l'objectif
m.setObjective(quicksum(c[i][j] * x[i][j] for i in V for j in V), GRB.MAXIMIZE)
if verbose:
m.update()
print("obj", round(time()-t, 1))
t = time()
# définition des contraintes
for i in V:
m.addConstr(quicksum(x[i][j] for j in V), GRB.EQUAL, 1, "ContrainteA%d" % i)
for j in V:
m.addConstr(quicksum(x[i][j] for i in V) == 1)
# inutile :
# for i in V:
# for j in V:
# if i!=j:
# m.addConstr(x[i][j] + x[j][i] <= 1)
# sous tours - contraintes
m.addConstr(quicksum(z[0][j] for j in V) == n - 1)
for i in V[1:]:
m.addConstr(quicksum(z[i][j] for j in V) + 1 == quicksum(z[j][i] for j in V))
for i in V:
for j in V[1:]:
if i!=j:
m.addConstr(z[i][j] + z[j][i] <= (n - 1) * (x[i][j] + x[j][i]))
if verbose:
m.update()
print("constr", round(time()-t, 1))
t = time()
# maj du modèle pour intégrer les variables
m.optimize()
if verbose:
print("optimize", round(time()-t, 1))
#todo il faut garder [la référence de m ?] si on veut retourner les variables
return [[j.x for j in i] for i in x]
def _plne_to_sol(ins, instanciations):
def find_next(current):
# noinspection PyTypeChecker
return instanciations[current].index(1)
curr = find_next(len(ins.data))
res = [curr]
for _ in range(len(ins.data)-1):
curr = find_next(curr)
res.append(curr)
return res
def _plne_to_sol_relaxed(ins, instanciations):
def find_next(current, used):
m = -1
argmax = None
for ind, val in enumerate(instanciations[current]):
if val > m and ind not in used:
m = val
argmax = ind
return argmax
curr = len(ins.data)
ens = {curr}
curr = find_next(len(ins.data), ens)
ens.add(curr)
res = [curr]
for _ in range(len(ins.data)-1):
curr = find_next(curr, ens)
ens.add(curr)
res.append(curr)
return res
def _ins_to_sol(previous_tags, ins, relaxation_lin, verbose):
instanciations = _ins_to_plne(previous_tags, ins, relaxation_lin, verbose=verbose)
t = time()
if relaxation_lin:
sol = _plne_to_sol_relaxed(ins, instanciations)
else:
sol = _plne_to_sol(ins, instanciations)
if verbose:
print("traduction", round(time() - t, 1))
return sol
def plne_h(pb: utils.Instance, n, relaxation_lin, verbose=False):
sol = solution.Solution(pb)
previous_tags = set()
for i in range(0, len(pb.data), n):
sub_pb = utils.Instance.create_instance_h(pb.data[i: i+n])
sub_res = _ins_to_sol(previous_tags, sub_pb, relaxation_lin, verbose=verbose)
for j, x in enumerate(sub_res, i):
sol.setH(j, i+x)
previous_tags = pb.data[sub_res[-1]]
return sol
def _test_plne():
import matplotlib.pyplot as plt
x = [1, 10, 15, 20]
times = []
scores=[]
for n in x:
ins = utils.read(1, 5000)
t = time()
sol = plne_h(ins, n=n, relaxation_lin=True, verbose=False)
assert len(set(sol.ordre)) == len(sol.ordre)
assert len(sol.V) + len(sol.H) == len(sol.ordre)
print("taille", len(ins.data), "temps", round(time()-t, 1), "score glouton_v2", sol.score())
times.append(time()-t)
scores.append(sol.score())
ax1 = plt.gca()
ax2 = ax1.twinx()
ax1.plot(x, times)
ax2.plot(x, scores, color="green")
plt.show()
if __name__ == '__main__':
_test_plne()
| true |
51a0b673ea6e4e8d367eb08707c3db7783d37806 | Python | ipiyushbhoi/Data-Structures-and-Algorithm-Problems | /binary_search_trees/replace.py | UTF-8 | 476 | 3.453125 | 3 | [] | no_license | '''
Given a string, compute recursively a new string where all appearances of "pi" have been replaced by "3.14".
Sample Input 1 :
xpix
Sample Output :
x3.14x
Sample Input 2 :
pipi
Sample Output :
3.143.14
Sample Input 3 :
pip
Sample Output :
3.14p
'''
def replace(s):
if len(s)<2:
return s
for i in range(0,len(s)):
if s[i:i+2]=='pi':
return '3.14'+replace(s[i+2:])
return s[0]+replace(s[1:])
n=str(input())
print(replace(n))
| true |
bf86aa6a243bbbb7a729014f90d775279f303917 | Python | mv-raman/Notebooks_repo | /spark_training/training_1/friends_by_age_key_value.py | UTF-8 | 700 | 3.046875 | 3 | [] | no_license | from pyspark import SparkContext,SparkConf
import collections
conf=SparkConf().setMaster("local").setAppName("FriendsByAge")
sc=SparkContext(conf=conf)
def parseLine(line):
fields=line.split(',')
age=int(fields[2])
numFriends=int(fields[3])
return age,numFriends
lines=sc.textFile("/home/venkat/Documents/Repo_Download/spark_course_1/fakefriends.csv")
rdd=lines.map(parseLine)
totalsByAge=rdd.mapValues(lambda x:(x,1)).reduceByKey(lambda x,y:(x[0]+y[0],x[1]+y[1]))
averagesByAge=totalsByAge.mapValues(lambda x:x[0]/x[1])
results=averagesByAge.collect()
sortedResults=collections.OrderedDict(sorted(results))
for key,value in sortedResults.items():
print("%s %i"%(key,value)) | true |
d2f02a87add735510e73d71a1ac078e11ab25fe1 | Python | limo1996/ETH-DataScience | /src/contour/ContourDrawer.py | UTF-8 | 3,033 | 3.53125 | 4 | [] | no_license | '''
File name: ContourDrawer.py
Author: Jakub Lichman
Date created: 4/10/2018
Python Version: 3.6.3
'''
import os
from gmplot import gmplot
from .ContourGradients import getGradient, HeatType
class Coordinate(object):
""" Class that represents coordinates """
lat: float
lon: float
def __init__(self, lat: float, lon: float):
""" Creates new instance of Coordinate """
self.lat = lat
self.lon = lon
def from_string(line):
""" Creates new instance of Coordinate from line in file """
split = line.split(' ')
return Coordinate(float(split[0]), float(split[1]))
def to_tuple(self):
""" returns array of size 2 of coordinates """
return (self.lat, self.lon)
def to_string(self):
return '{0} {1}'.format(self.lat, self.lon)
class Settings(object):
""" Class that holds settings for contour map drawing """
center: Coordinate
radius: int
zoom: int
heat_type: HeatType
# default settings
def get_default():
""" Returns default settings """
return Settings(Coordinate(47.376197, 8.545886), 40, 12, HeatType.GREEN_TO_EVERYTHING_RED)
def __init__(self, center: Coordinate, radius: int, zoom: int, heat_type: HeatType):
""" Creates new instance of Settings """
self.center = center
self.radius = radius
self.zoom = zoom
self.heat_type = heat_type
class ContourDrawer(object):
def load_data(self, path, separator, columns):
""" loads data from provided file. """
with open(path) as file:
content = file.read()
content = content.split('\n')
header = content[0].split('|')
self.title = header[0]
content = content[1:]
self.data = []
for line in content:
if not line.isspace() and line:
self.data.append(self.parse_line(line, separator, columns))
def get_data(self):
""" gets loaded data """
return self.data
def parse_line(self, line, separator, columns):
""" parses line into coordinate """
split = line.split(separator)
assert len(columns) == 2
return Coordinate(float(split[columns[0]]), float(split[columns[1]]))
def draw_contour_google_map(self, settings: Settings, out_file):
""" draws provided data on google map with given settings """
latitudes = [i.lat for i in self.data]
longitudes = [i.lon for i in self.data]
# creates google map plotter
plotter = gmplot.GoogleMapPlotter(
settings.center.lat,
settings.center.lon,
settings.zoom)
# plots heat map
plotter.heatmap(
latitudes,
longitudes,
radius=settings.radius,
gradient=getGradient(settings.heat_type))
# plot and save to html file
assert out_file.endswith('.html')
plotter.draw(out_file) | true |
1c0b02bcca34ee6ff68894739bb05cc10a0a64f8 | Python | nischalshrestha/PyMonkey | /src/monkey/tokens/token.py | UTF-8 | 967 | 3.15625 | 3 | [
"MIT"
] | permissive | from typing import NamedTuple
# Constants
ILLEGAL = "ILLEGAL"
EOF = "EOF"
# Identifiers + literals
IDENT = "IDENT" # add, foobar, x, y, ...
INT = "INT" # 1343456
STRING = "STRING"
# Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
LT = "<"
GT = ">"
EQ = "=="
NOT_EQ = "!="
# Delimiters
COMMA = ","
SEMICOLON = ";"
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
LBRACKET = "["
RBRACKET = "]"
COLON = ":"
# Keywords
FUNCTION = "FUNCTION"
LET = "LET"
TRUE = "TRUE"
FALSE = "FALSE"
IF = "IF"
ELSE = "ELSE"
RETURN = "RETURN"
MACRO = "MACRO"
# immutable 'struct'
class Token(NamedTuple):
Type: str
Literal: str
keywords = {
'fn': FUNCTION,
'let': LET,
'true': TRUE,
'false': FALSE,
'if': IF,
'else': ELSE,
'return': RETURN,
'macro': MACRO
}
def lookup_ident(ident):
return keywords[ident] if ident in keywords else IDENT | true |
94710c4a889c01c52e5c231f66781419e4590597 | Python | Adrriii/ia-capture-the-flag | /src/game/ai/behaviorTree/NodeTree.py | UTF-8 | 1,816 | 3.40625 | 3 | [] | no_license | from abc import (ABCMeta, abstractmethod)
import copy
class NodeTree(metaclass=ABCMeta):
"""
This is a basic class for representing nodes in behavior tree.
Attributes:
_nodes (Node) : List of children.
_currentlyProcessing (Node): As a node can take many tick to processing task,
node store the currently processing node. It's faster than looking in all
the tree for the active node.
Enumeration:
RUNNING : Node must return NodeTree.RUNNING if computation is not finish.
SUCCESS : Node must return NodeTree.SUCCESS on success.
FAILURE : Node must return NodeTree.FAILURE on failuer.
"""
RUNNING = 0
SUCCESS = 1
FAILURE = 2
def __init__(self):
self._nodes = list()
self._currentlyProcessing = None
@abstractmethod
def tick(self, dt):
"""
Core method of the node, called for processing the behavior tree.
Parameters :
dt (int) : Delta time, used by function in leaf for time management.
Return :
State (int) : Must be NodeTree.RUNNING, NodeTree.SUCCESS or NodeTree.FAILURE.
"""
...
def appendNode(self, node):
"""
Append node to the end of the node list.
Parameters :
node (Node) : The node to append.
"""
self._nodes.append(node)
def insert_node(self, node, index=0):
"""
Insert node in the node list.
Parameters :
node (Node) : The node to append.
index (int) : Index to insert node at.
"""
self._nodes.insert(index, node)
def getNodes(self):
"""
Return the list of all direct child of the node.
"""
return copy.copy(self._nodes) | true |
54cb16805945696299f657dbed08777322b8004c | Python | zerosum99/python_basic | /myPython/class/mixin.py | UTF-8 | 906 | 3.5 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 29 11:22:03 2016
@author: 06411
"""
class Person:
def __init__(self, name, surname, number):
self.name = name
self.surname = surname
self.number = number
class LearnerMixin:
def __init__(self):
self.classes = []
def enrol(self, course):
self.classes.append(course)
class TeacherMixin:
def __init__(self):
self.courses_taught = []
def assign_teaching(self, course):
self.courses_taught.append(course)
class Tutor(Person, LearnerMixin, TeacherMixin):
def __init__(self, *args, **kwargs):
Person.__init__(self,*args, **kwargs)
LearnerMixin.__init__(self)
TeacherMixin.__init__(self)
jane = Tutor("Jane", "Smith", "SMTJNX045")
jane.enrol('a_postgrad_course')
jane.assign_teaching('an_undergrad_course')
print jane.classes
print jane.courses_taught
| true |
4c25b065998a7ef71a392e5e82c1186c2fef761b | Python | AmrHRAbdeen/Python | /EGY_COVID_19_tracker.py | UTF-8 | 678 | 3.234375 | 3 | [] | no_license | ######################################################
# Developing a script to GET EGYPT COVID_19 Stats
######################################################
import pandas
import requests
# URL for COVID_19 Stats
URL = "https://www.worldometers.info/coronavirus/"
# GET request to URL
requestRes = requests.get(URL)
pageContent = requestRes.text
#print(pageContent)
# Pandas used to convert HTML tables into dataFrames
dataFrames = pandas.read_html(pageContent)
#print(dataFrames)
#access table of interest
dataFrame = dataFrames[0]
# Length of Rows
rowsLen = dataFrame.index
print(rowsLen)
# Search for Egypt in DataFrame
for cnt in range(0, len(rowsLen[1])):
pass
| true |
8faa138df7939b39dc4421150b44f36c0312987c | Python | AvinashAnad/HackerRank | /wordorder.py | UTF-8 | 329 | 2.8125 | 3 | [] | no_license | n=int(input())
l=[]
if 1<=n<=10**5:
[l.append(input()) for i in range(n)]
#print (l)
#ls=set(l)
#print(len(ls))
#print(ls)
#l = ['bcdef', 'abcdefg', 'bcde', 'bcdef']
d = dict()
sampl = l
[l[i],l.count(l[i]) for i in range(len(l))]
#print (d)
#print (str(len(d))+'\n'+ str([i for i in d.values()])[1:-1].replace(", "," ") ) | true |
e957eb98900e9a48a1739742892f2c1f9d0f62be | Python | konng88/My_Algorithms | /LeetCode/1.py | UTF-8 | 267 | 3.453125 | 3 | [] | no_license | def twoSum(nums,target):
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if nums[i] + nums[j] == target:
return [i,j]
solution = twoSum(nums = [2, 7, 11, 15, 18 ,21], target = 33)
print(solution)
| true |
39f65d09051d63288cbac2405418cf527b40a138 | Python | 27Saidou/cours_python | /MetaClasseCompteur.py | UTF-8 | 576 | 3.671875 | 4 | [] | no_license | class MetaclasseCompteur(type):
"""Une méta-classe pour aider à compter les instances créées."""
def __init__(cls, *args, **kwargs):
super().__init__(*args, **kwargs)
cls._nb_instances = 0
@property
def nb_instances(cls):
return cls._nb_instances
def plus_une_instance(cls):
cls._nb_instances += 1
class MaClasse(metaclass=MetaclasseCompteur):
def __init__(self):
MaClasse.plus_une_instance()
m1 = MaClasse()
m2 = MaClasse()
m3 = MaClasse()
print(MaClasse.nb_instances)
print(type(m1).mro())
| true |
48b6c161dfd062684491efe8b90bc418e45008a9 | Python | LiLi-scripts/python_basic-1 | /hw2/numbers.py | UTF-8 | 1,191 | 4.46875 | 4 | [] | no_license | """
Дано число от 1 до 999.
1. Найти сумму цифр числа. (для 2-знач числа - lesson1/3_practice_operators.py)
2. Вывести, в каком порядке расположены цифры (возрастания/убывания/в разброс)
"""
# Ниже описан один из вариантов решения задачи.
number = int(input())
if 1 <= number < 10: # number >= 1 and number < 10
print('sum =', number)
elif 10 <= number < 100:
digit_1 = number // 10
digit_2 = number % 10
print('sum =', digit_1 + digit_2)
if digit_1 < digit_2:
print('increase')
elif digit_1 > digit_2:
print('decrease')
else:
print('equal')
elif 100 <= number < 1000:
digit_1 = number // 100
digit_2 = number // 10 % 10
digit_3 = number % 10
print('sum =', digit_1 + digit_2 + digit_3)
if digit_1 < digit_2 < digit_3:
print('increase')
elif digit_1 > digit_2 > digit_3:
print('decrease')
elif digit_1 == digit_2 == digit_3:
print('equal')
else:
print('random')
else:
print('number must be from 1 to 999')
| true |
e65775c71a8b6c8c5b469568997766b31aed5cc1 | Python | sanjaybv/advent-of-code | /2016/day03/one.py | UTF-8 | 316 | 3.234375 | 3 | [] | no_license | count = 0
with open('input.txt') as input_file:
for line in input_file:
nums = map(int, line.strip().split())
print nums
if nums[0] < nums[1] + nums[2] and\
nums[1] < nums[0] + nums[2] and\
nums[2] < nums[0] + nums[1]:
count += 1
print count
| true |
ba3d339eaf78fdff358646f457e7f82cfe9e7ee5 | Python | jiyabing/learning | /开班笔记/python基础部分/day16/code/assert.py | UTF-8 | 351 | 4.125 | 4 | [] | no_license | def get_age():
a = input('输入年龄:')
a = int(a)
assert a < 140,'年龄不可能大于140!'
assert a >= 0,'年龄不能为负数!'
return a
try:
age=get_age()
except AssertionError as err:
print('发生了断言错误,错误对象是:',err)
age = 0 #做相应的处理
print('输入的年龄是:',age)
| true |
1645a4d0e45c68b7f4e67c930b2afc5d647b3d3e | Python | marleentheyoung/team_SEB | /project_code/algorithms/hillclimber.py | UTF-8 | 10,410 | 3.28125 | 3 | [] | no_license | # Team SEB
# Minor Programmeren (Programmeertheorie)
# hillclimber.py
#
# - HillClimber algorithm.
from project_code.classes.land import Land
from project_code.classes.house import House
from copy import deepcopy
from shapely.geometry import Polygon
from project_code.visualisations.visualise import visualise
import matplotlib.pyplot as plt
class HillClimber():
def __init__(self, housing_map):
self.winner = self.run(housing_map)
def valid_move(self, house, waters):
'''
checks if the movement of a house is valid
'''
# checks if house intersects with water
for water in waters:
if water.polygon.intersects(house.polygon):
return False
# checks if house crosses the land border
if house.bottom_left[0] < house.free_space or house.bottom_left[0] > 180 - house.width_with_freespace or house.bottom_left[1] < house.free_space or house.bottom_left[1] > 160 - house.depth_with_freespace or house.top_right[0] < house.width_with_freespace or house.top_right[0] > 180 - house.free_space or house.top_right[1] < house.depth_with_freespace or house.top_right[1] > 160 - house.free_space:
return False
return True
def run(self, housing_map):
'''
moves all houses 55 times with steps beginning from 8 till 1
'''
print("housing map value:")
print(housing_map.total)
copy_map = deepcopy(housing_map)
generations = []
generations.append("copy_map")
current_best_value = copy_map.total
counter = 0
iterations = 55
# data for iteration graph
iteration_counter = 0
results = []
total_iterations = []
for i in range(iterations):
if i < 7:
steps = 8
if i >= 7 and i < 14:
steps = 7
if i >= 14 and i < 21:
steps = 6
if i >= 21 and i < 24:
steps = 5
if i >= 24 and i < 28:
steps = 4
if i >= 32 and i < 36:
steps = 3
if i >= 40 and i < 44:
steps = 2
if i >= 44:
steps = 1
for house in copy_map.all_land_objects:
copy_map.total = 0
if house.name != "water":
# moves house to the right
total_value_right = 0
house.move(steps, 0)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_right = copy_map.calculate_price(copy_map.all_land_objects)
house.move(-steps, 0)
copy_map.total = 0
if total_value_right > current_best_value:
generations.pop(0)
generations.append("right")
current_best_value = total_value_right
# moves house to the left
total_value_left = 0
house.move(-steps, 0)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_left = copy_map.calculate_price(copy_map.all_land_objects)
house.move(steps, 0)
copy_map.total = 0
if total_value_left > current_best_value:
generations.pop(0)
generations.append("left")
current_best_value = total_value_left
# moves house up
total_value_up = 0
house.move(0, steps)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_up = copy_map.calculate_price(copy_map.all_land_objects)
house.move(0, -steps)
copy_map.total = 0
if total_value_up > current_best_value:
generations.pop(0)
generations.append("up")
current_best_value = total_value_up
# moves house down
total_value_down = 0
house.move(0, -steps)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_down = copy_map.calculate_price(copy_map.all_land_objects)
house.move(0, steps)
copy_map.total = 0
if total_value_down > current_best_value:
generations.pop(0)
generations.append("down")
current_best_value = total_value_down
# moves house top right corner
total_value_top_right_corner = 0
house.move(steps, steps)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_top_right_corner = copy_map.calculate_price(copy_map.all_land_objects)
house.move(-steps, -steps)
copy_map.total = 0
if total_value_top_right_corner > current_best_value:
generations.pop(0)
generations.append("top_right_corner")
current_best_value = total_value_top_right_corner
# moves house top left corner
total_value_top_left_corner = 0
house.move(-steps, steps)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_top_left_corner = copy_map.calculate_price(copy_map.all_land_objects)
house.move(steps, -steps)
copy_map.total = 0
if total_value_top_left_corner > current_best_value:
generations.pop(0)
generations.append("top_left_corner")
current_best_value = total_value_top_left_corner
# moves house bottom right corner
total_value_bottom_right_corner = 0
house.move(steps, -steps)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_bottom_right_corner = copy_map.calculate_price(copy_map.all_land_objects)
house.move(-steps, steps)
copy_map.total = 0
if total_value_bottom_right_corner > current_best_value:
generations.pop(0)
generations.append("bottom_right_corner")
current_best_value = total_value_bottom_right_corner
# moves house bottom left corner
total_value_bottom_left_corner = 0
house.move(-steps, -steps)
# checks for overlap
if self.valid_move(house, housing_map.water):
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_bottom_left_corner = copy_map.calculate_price(copy_map.all_land_objects)
house.move(steps, steps)
copy_map.total = 0
if total_value_bottom_left_corner > current_best_value:
generations.pop(0)
generations.append("bottom_left_corner")
current_best_value = total_value_bottom_left_corner
# makes the decisive move based upon the best value
if generations[0] == "right":
house.move(steps, 0)
elif generations[0] == "left":
house.move(-steps, 0)
elif generations[0] == "up":
house.move(0, steps)
elif generations[0] == "down":
house.move(0, -steps)
elif generations[0] == "top_right_corner":
house.move(steps, steps)
elif generations[0] == "top_left_corner":
house.move(-steps, steps)
elif generations[0] == "bottom_right_corner":
house.move(steps, -steps)
elif generations[0] == "bottom_left_corner":
house.move(-steps, -steps)
generations = ["copy_map"]
counter += 1
print(f"counter: {counter}")
# data for iteration graph
iteration_counter += 1
total_iterations.append(iteration_counter)
results.append(current_best_value)
# calculates value for return object
copy_map.calculate_distance(copy_map.all_land_objects)
total_value_up = copy_map.calculate_price(copy_map.all_land_objects)
copy_map.calculate_price_real(copy_map.all_land_objects)
print(f"total: {copy_map.total}")
print(f"total_real: {copy_map.total_real}")
plt.plot(total_iterations, results)
plt.xlabel('x - axis')
plt.ylabel('y - axis')
plt.title('Iteration graph')
plt.savefig('output/iteration_graph.png')
return copy_map | true |
f415bcffc93d7548694c1efe9ea100aa497abcdd | Python | iPERDance/iPERCore | /iPERCore/tools/utils/filesio/persistence.py | UTF-8 | 1,426 | 2.53125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | # Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import os
import pickle
import json
import toml
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
return paths
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
return path
def clear_dir(path):
import shutil
if os.path.exists(path) and os.path.isdir(path) and len(os.listdir(path)) > 0:
shutil.rmtree(path)
path = mkdir(path)
return path
def load_pickle_file(pkl_path):
with open(pkl_path, "rb") as f:
data = pickle.load(f, encoding="latin1")
return data
def write_pickle_file(pkl_path, data_dict):
with open(pkl_path, "wb") as fp:
pickle.dump(data_dict, fp, protocol=2)
def load_json_file(json_file):
with open(json_file, "r") as f:
data = json.load(f)
return data
def write_json_file(json_file, data_dict):
with open(json_file, "w") as f:
json_str = json.dumps(data_dict)
f.writelines(json_str)
def load_toml_file(toml_file):
with open(toml_file, "r", encoding="utf-8") as f:
data = toml.load(f)
return data
def write_toml_file(toml_file, data_dict):
with open(toml_file, "w", encoding="utf-8") as fp:
toml.dump(data_dict, fp)
| true |
693e20bbe87f9f358aa5d85f90d54ed8aae91b1a | Python | Zigmuntovich/python_training | /test_login_danfoss_itp.py | UTF-8 | 5,469 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest
from data_class import Data
class UntitledTestCase(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.base_url = "https://www.google.com/"
self.verificationErrors = []
self.accept_next_alert = True
def test_login_danfoss_itp(self):
driver = self.driver
self.open_home_page(driver)
self.login(driver, user_email="@ukr.net", password="afpvfg2")
self.fill_data(driver, Data(p1=10, p2=3, t1=150, t2=70, heat_power=300, static_height=35, t11=90))
driver.find_element_by_xpath("//button/*[text()='Розрахунок']/..").click()
def fill_data(self, driver, data: Data):
driver.find_element_by_xpath("//textarea").click()
driver.find_element_by_xpath("//textarea").clear()
driver.find_element_by_xpath("//textarea").send_keys(u"Новий розрахунок 2")
driver.find_element_by_xpath("//input").click()
driver.find_element_by_xpath("//input").clear()
driver.find_element_by_xpath("//input").send_keys(data.p1)
driver.find_element_by_xpath("//div[2]/div[2]/nz-input-number/div[2]/input").click()
driver.find_element_by_xpath("//div[2]/div[2]/nz-input-number/div[2]/input").clear()
driver.find_element_by_xpath("//div[2]/div[2]/nz-input-number/div[2]/input").send_keys(data.p2)
driver.find_element_by_xpath("//div[3]/div[2]/nz-input-number/div[2]/input").click()
driver.find_element_by_xpath("//div[3]/div[2]/nz-input-number/div[2]/input").clear()
driver.find_element_by_xpath("//div[3]/div[2]/nz-input-number/div[2]/input").send_keys(data.t1)
driver.find_element_by_xpath("//div[4]/div[2]/nz-input-number/div[2]/input").click()
driver.find_element_by_xpath("//div[4]/div[2]/nz-input-number/div[2]/input").clear()
driver.find_element_by_xpath("//div[4]/div[2]/nz-input-number/div[2]/input").send_keys(data.t2)
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='Тип системи:'])[1]/following::nz-select-top-control[1]").click()
driver.find_element_by_xpath(
"//div[@id='cdk-overlay-0']/nz-option-container/div/cdk-virtual-scroll-viewport/div/nz-option-item/div").click()
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)=concat('Тип під', \"'\", 'єднання:')])[1]/following::nz-select-top-control[1]").click()
driver.find_element_by_xpath(
"//div[@id='cdk-overlay-1']/nz-option-container/div/cdk-virtual-scroll-viewport/div/nz-option-item/div").click()
driver.find_element_by_xpath("//div[3]/div[2]/div[4]/div[2]/nz-input-number/div[2]/input").click()
driver.find_element_by_xpath("//div[3]/div[2]/div[4]/div[2]/nz-input-number/div[2]/input").clear()
driver.find_element_by_xpath("//div[3]/div[2]/div[4]/div[2]/nz-input-number/div[2]/input").send_keys(data.heat_power)
driver.find_element_by_xpath("//div[5]/div[2]/nz-input-number/div[2]/input").click()
driver.find_element_by_xpath("//div[5]/div[2]/nz-input-number/div[2]/input").clear()
driver.find_element_by_xpath("//div[5]/div[2]/nz-input-number/div[2]/input").send_keys(data.static_height)
driver.find_element_by_xpath("//div[6]/div[2]/nz-input-number/div[2]/input").click()
driver.find_element_by_xpath("//div[6]/div[2]/nz-input-number/div[2]/input").clear()
driver.find_element_by_xpath("//div[6]/div[2]/nz-input-number/div[2]/input").send_keys(data.t11)
def login(self, driver, user_email, password):
driver.find_element_by_xpath("//input[@type='text']").click()
driver.find_element_by_xpath("//input[@type='text']").clear()
driver.find_element_by_xpath("//input[@type='text']").send_keys(user_email)
driver.find_element_by_xpath("//input[@type='password']").click()
driver.find_element_by_xpath("//input[@type='password']").clear()
driver.find_element_by_xpath("//input[@type='password']").send_keys(password)
driver.find_element_by_xpath(
u"(.//*[normalize-space(text()) and normalize-space(.)='Забули пароль?'])[1]/following::button[1]").click()
def open_home_page(self, driver):
driver.get("https://itp-danfoss.web.app/")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| true |
866be6bd4749d4b05a55dafbe20063521d987175 | Python | davidhendel/galpy | /galpy/actionAngle/actionAngleIsochroneInverse.py | UTF-8 | 7,628 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | ###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleIsochroneInverse
#
# Calculate (x,v) coordinates for the Isochrone potential from
# given actions-angle coordinates
#
###############################################################################
import numpy
from scipy import optimize
from ..util import conversion
from ..potential import IsochronePotential
from .actionAngleInverse import actionAngleInverse
class actionAngleIsochroneInverse(actionAngleInverse):
"""Inverse action-angle formalism for the isochrone potential, on the Jphi, Jtheta system of Binney & Tremaine (2008); following McGill & Binney (1990) for transformations"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleIsochroneInverse object
INPUT:
Either:
b= scale parameter of the isochrone parameter (can be Quantity)
ip= instance of a IsochronePotential
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2017-11-14 - Started - Bovy (UofT)
"""
actionAngleInverse.__init__(self,*args,**kwargs)
if not 'b' in kwargs and not 'ip' in kwargs: #pragma: no cover
raise IOError("Must specify b= for actionAngleIsochrone")
if 'ip' in kwargs:
ip= kwargs['ip']
if not isinstance(ip,IsochronePotential): #pragma: no cover
raise IOError("'Provided ip= does not appear to be an instance of an IsochronePotential")
# Check the units
self._pot= ip
self._check_consistent_units()
self.b= ip.b
self.amp= ip._amp
else:
self.b= conversion.parse_length(kwargs['b'],ro=self._ro)
rb= numpy.sqrt(self.b**2.+1.)
self.amp= (self.b+rb)**2.*rb
# In case we ever decide to implement this in C...
self._c= False
ext_loaded= False
if ext_loaded and (('c' in kwargs and kwargs['c'])
or not 'c' in kwargs): #pragma: no cover
self._c= True
else:
self._c= False
if not self._c:
self._ip= IsochronePotential(amp=self.amp,b=self.b)
#Define _pot, because some functions that use actionAngle instances need this
self._pot= IsochronePotential(amp=self.amp,b=self.b)
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the phase-space coordinates (x,v) for a number of angles on a single torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
[R,vR,vT,z,vz,phi]
HISTORY:
2017-11-14 - Written - Bovy (UofT)
"""
return self._xvFreqs(jr,jphi,jz,angler,anglephi,anglez,**kwargs)[:6]
def _xvFreqs(self,jr,jphi,jz,angler,anglephi,anglez,**kwargs):
"""
NAME:
xvFreqs
PURPOSE:
evaluate the phase-space coordinates (x,v) for a number of angles on a single torus as well as the frequencies
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
OUTPUT:
([R,vR,vT,z,vz,phi],OmegaR,Omegaphi,Omegaz)
HISTORY:
2017-11-15 - Written - Bovy (UofT)
"""
L= jz+numpy.fabs(jphi) # total angular momentum
L2= L**2.
sqrtfourbkL2= numpy.sqrt(L2+4.*self.b*self.amp)
H= -2.*self.amp**2./(2.*jr+L+sqrtfourbkL2)**2.
# Calculate the frequencies
omegar= (-2.*H)**1.5/self.amp
omegaz= (1.+L/sqrtfourbkL2)/2.*omegar
# Start on getting the coordinates
a= -self.amp/2./H-self.b
ab= a+self.b
e= numpy.sqrt(1.+L2/(2.*H*a**2.))
# Solve Kepler's-ish equation; ar must be between 0 and 2pi
angler= (numpy.atleast_1d(angler) % (-2.*numpy.pi)) % (2.*numpy.pi)
anglephi= numpy.atleast_1d(anglephi)
anglez= numpy.atleast_1d(anglez)
eta= numpy.empty(len(angler))
for ii,ar in enumerate(angler):
try:
eta[ii]= optimize.newton(lambda x: x-a*e/ab*numpy.sin(x)-ar,
0.,
lambda x: 1-a*e/ab*numpy.cos(x))
except RuntimeError:
# Newton-Raphson did not converge, this has to work,
# bc 0 <= ra < 2pi the following start x have different signs
eta[ii]= optimize.brentq(lambda x: x-a*e/ab*numpy.sin(x)-ar,
0.,2.*numpy.pi)
coseta= numpy.cos(eta)
r= a*numpy.sqrt((1.-e*coseta)*(1.-e*coseta+2.*self.b/a))
vr= numpy.sqrt(self.amp/ab)*a*e*numpy.sin(eta)/r
taneta2= numpy.tan(eta/2.)
tan11= numpy.arctan(numpy.sqrt((1.+e)/(1.-e))*taneta2)
tan12= numpy.arctan(\
numpy.sqrt((a*(1.+e)+2.*self.b)/(a*(1.-e)+2.*self.b))*taneta2)
tan11[tan11 < 0.]+= numpy.pi
tan12[tan12 < 0.]+= numpy.pi
Lambdaeta= tan11+L/sqrtfourbkL2*tan12
psi= anglez-omegaz/omegar*angler+Lambdaeta
lowerl= numpy.sqrt(1.-jphi**2./L2)
sintheta= numpy.sin(psi)*lowerl
costheta= numpy.sqrt(1.-sintheta**2.)
vtheta= L*lowerl*numpy.cos(psi)/costheta/r
R= r*costheta
z= r*sintheta
vR= vr*costheta-vtheta*sintheta
vz= vr*sintheta+vtheta*costheta
sinu= sintheta/costheta*jphi/L/lowerl
u= numpy.arcsin(sinu)
u[vtheta < 0.]= numpy.pi-u[vtheta < 0.]
phi= anglephi-numpy.sign(jphi)*anglez+u
# For non-inclined orbits, phi == psi
phi[True^numpy.isfinite(phi)]= psi[True^numpy.isfinite(phi)]
phi= phi % (2.*numpy.pi)
phi[phi < 0.]+= 2.*numpy.pi
return (R,vR,jphi/R,z,vz,phi,
omegar,numpy.sign(jphi)*omegaz,omegaz)
def _Freqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
Freqs
PURPOSE:
return the frequencies corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
OUTPUT:
(OmegaR,Omegaphi,Omegaz)
HISTORY:
2017-11-15 - Written - Bovy (UofT)
"""
L= jz+numpy.fabs(jphi) # total angular momentum
sqrtfourbkL2= numpy.sqrt(L**2.+4.*self.b*self.amp)
H= -2.*self.amp**2./(2.*jr+L+sqrtfourbkL2)**2.
# Calculate the frequencies
omegar= (-2.*H)**1.5/self.amp
omegaz= (1.+L/sqrtfourbkL2)/2.*omegar
return (omegar,numpy.sign(jphi)*omegaz,omegaz)
| true |
3fc05c10034885f28fddf85be10c0c72062d7aac | Python | sirexeclp/handouter | /handouter.py | UTF-8 | 714 | 2.671875 | 3 | [] | no_license | #%%
import sys
import re
input_file = sys.argv[1]
#%%
from PyPDF2 import PdfFileReader
pdf = PdfFileReader(open(input_file, 'rb'))
#%%
from PyPDF2 import PdfFileWriter, PdfFileReader
last_page = 0
output = PdfFileWriter()
pages2keep =[]
for i in reversed(range(pdf.getNumPages())):
page = pdf.getPage(i)
last_line = page.extractText().strip().split("\n")[-1]
m = re.search("^[0-9]+", last_line)
if m:
m = m.group(0)
else:
pages2keep.append(page)
continue
if last_page != int(m):
pages2keep.append(page)
last_page = int(m)
[output.addPage(page) for page in reversed(pages2keep)]
with open(f"{input_file}.short.pdf", 'wb') as f:
output.write(f) | true |
cc0e6aeedbf97bcbc85ffa35c0f82d51438c18dd | Python | shayan-7/accesshandler | /accesshandler/cache.py | UTF-8 | 1,271 | 3.140625 | 3 | [] | no_license | from datetime import timedelta
import redis
from nanohttp import settings
_redisconnection = None
def redisconnection():
'''
Returns an global redis connection object.
'''
global _redisconnection
if _redisconnection is None:
_redisconnection = redis.Redis(**settings.redis_)
return _redisconnection
def setkey(connection, ip, pattern, ttl):
'''
Sets a key including `ip` and `pattern` and a viewcount representing
how many times an user from specific ip viewed a url which matches the
pattern. Example:
{"1.1.1.1::/foo/bar": "20"}
If the `viewcount` is None, so it means it's the first view from ip.
...
Parameters
----------
connection : redis.Redis
The redis connection
ip : str
The first part of storing key
pattern : str
The second part of storing key
ttl : datetime.timedelta
The expiry time of key value
'''
connection.setex(keystr(ip, pattern), ttl, 1)
def keystr(ip, pattern):
'''
Creates an string from `ip` and `pattern` with a seperator between
...
Parameters
----------
ip : str
The first part of storing key
pattern : str
The second part of storing key
'''
return f'{ip}::{pattern}'
| true |
90a8a6bfe5c95ba912a66b4e90b31c22e2f4944e | Python | iftekarpatel/greyatom-python-for-data-science | /Make-Sense-of-Census/code.py | UTF-8 | 1,596 | 3.421875 | 3 | [
"MIT"
] | permissive | # --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
data=np.genfromtxt(path, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
print(data.ndim)
#New record
new_record=np.array([50, 9, 4, 1, 0, 0, 40, 0])
print(new_record.ndim)
#Code starts here
census = np.vstack([data,new_record])
print(census)
# --------------
#Code starts here
age = census[:,0]
print(age)
max_age = age.max()
print(max_age)
min_age = age.min()
print(min_age)
age_mean = age.mean()
print(age_mean)
age_std = np.std(age)
print(age_std)
# --------------
#Code starts here
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
a = np.array([len_0,len_1,len_2,len_3,len_4])
print(a)
minority_race = a.min()
minority_race = 3
print(minority_race)
# --------------
#Code starts here
senior_citizens=census[census[:,0]>60]
print(senior_citizens)
working_hours_sum=senior_citizens.sum(axis=0)[6]
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1]>10]
low = census[census[:,1]<10]
avg_pay_high = high.mean(axis=0)[7]
avg_pay_low = low.mean(axis=0)[7]
avg_pay_low = 0.14
np.array_equal(avg_pay_high,avg_pay_low)
| true |
1c4d49a55aa3ffc36a8cc1603ef09d014f8e4793 | Python | wbroach/python_work | /squares.py | UTF-8 | 218 | 4.46875 | 4 | [] | no_license | # Long Way:
squares = []
for value in range (1,11):
square = value**2
squares.append(square)
print(squares)
# Short Way:
squares = []
for value in range (1,11):
squares.append(value**2)
print(squares)
| true |
545b4a1355cf27a345ffe133232a95ba2fbbba3d | Python | eamanu/escrutinio-social | /elecciones/management/commands/importar_carta_marina_2019_gobernador.py | UTF-8 | 7,109 | 2.75 | 3 | [] | no_license | from decimal import Decimal
from django.core.management.base import BaseCommand
from django.conf import settings
from pathlib import Path
from csv import DictReader
from elecciones.models import Seccion, Circuito, LugarVotacion, Mesa, Categoria
import datetime
CSV = Path(settings.BASE_DIR) / 'elecciones/data/escuelas-elecciones.2019-cordoba-gobernador.csv'
def to_float(val):
try:
return float(val.replace(',', '.'))
except:
return None
class BaseCommand(BaseCommand):
def success(self, msg, ending='\n'):
self.stdout.write(self.style.SUCCESS(msg), ending=ending)
def warning(self, msg, ending='\n'):
self.stdout.write(self.style.WARNING(msg), ending=ending)
def log(self, object, created=True, ending='\n'):
if created:
self.success(f'creado {object}', ending=ending)
else:
self.warning(f'{object} ya existe', ending=ending)
class Command(BaseCommand):
help = "Importar carta marina"
def handle(self, *args, **options):
reader = DictReader(CSV.open())
fecha = datetime.datetime(2019, 5, 12, 8, 0)
categoria_gobernador_cordoba, created = Categoria.objects.get_or_create(slug='gobernador-cordoba-2019', nombre='Gobernador Córdoba 2019', fecha=fecha)
categoria_intendente_cordoba, created = Categoria.objects.get_or_create(slug='intendente-cordoba-2019', nombre='Intendente Córdoba 2019', fecha=fecha)
categoria_legisladores_distrito_unico, created = Categoria.objects.get_or_create(slug='legisladores-dist-unico-cordoba-2019', nombre='Legisladores Distrito Único Córdoba 2019', fecha=fecha)
# categoria_tribunal_de_cuentas_provincial, created = Categoria.objects.get_or_create(slug='tribunal-cuentas-prov-cordoba-2019', nombre='Tribunal de Cuentas Provincia de Córdoba 2019', fecha=fecha, activa=False)
for c, row in enumerate(reader, 1):
depto = row['Nombre Seccion']
numero_de_seccion = int(row['Seccion'])
seccion, created = Seccion.objects.get_or_create(nombre=depto, numero=numero_de_seccion)
slg = f'legisladores-departamento-{depto}-2019'
nombre = f'Legisladores Depto {depto} Córdoba 2019'
# las departamentales no están activas por defecto
# POR AHORA NO LAS USAMOS (inactivas)
categoria_legislador_departamental, created = Categoria.objects.get_or_create(slug=slg,
nombre=nombre,
activa=False,
fecha=fecha)
self.log(seccion, created)
circuito, created = Circuito.objects.get_or_create(
nombre=row['Nombre Circuito'], numero=row['Circuito'], seccion=seccion
)
"""
# no sabemos que ciudadnes eligen intendente
# no estan en la base registrado que circuitos son ciudades en si misma y cuales son parte de una ciudad
nombre_circuito = row['Nombre Seccion']
slg = f'intendente-ciudad-{nombre_circuito}-2019'
nombre = f'Intendente Ciudad {nombre_circuito} Córdoba 2019'
categoria_intendente_municipal, created = Categoria.objects.get_or_create(slug=slg, nombre=nombre, fecha=fecha)
"""
self.log(circuito, created)
coordenadas = [to_float(row['Longitud']), to_float(row['Latitud'])]
if coordenadas[0] and coordenadas[1]:
geom = {'type': 'Point', 'coordinates': coordenadas}
if row['Estado Geolocalizacion'] == 'Match':
estado_geolocalizacion = 9
elif row['Estado Geolocalizacion'] == 'Partial Match':
estado_geolocalizacion = 5
else:
geom = None
estado_geolocalizacion = 0
escuela, created = LugarVotacion.objects.get_or_create(
circuito=circuito,
nombre=row['Establecimiento'],
direccion=row['Direccion'],
ciudad=row['Ciudad'] or '',
barrio=row['Barrio'] or ''
)
escuela.electores = int(row['electores'])
escuela.geom = geom
escuela.estado_geolocalizacion = estado_geolocalizacion
escuela.save()
self.log(escuela, created)
for mesa_nro in range(int(row['Mesa desde']), int(row['Mesa Hasta']) + 1):
mesa, created = Mesa.objects.get_or_create(numero=mesa_nro) # EVITAR duplicados en limpiezas de escuelas y otros
mesa.lugar_votacion=escuela
mesa.circuito=circuito
mesa.save()
if categoria_gobernador_cordoba not in mesa.categoria.all():
mesa.categoria_add(categoria_gobernador_cordoba)
self.success('Se agregó la mesa a la categoria a gobernador')
if categoria_legisladores_distrito_unico not in mesa.categoria.all():
mesa.categoria_add(categoria_legisladores_distrito_unico)
self.success('Se agregó la mesa a la categoria a legislador dist unico')
#if categoria_tribunal_de_cuentas_provincial not in mesa.categoria.all():
# mesa.categoria.add(categoria_tribunal_de_cuentas_provincial)
# self.success('Se agregó la mesa a la categoria a trib de cuentas provincial')
# agregar la categoria a legislador departamental
if categoria_legislador_departamental not in mesa.categoria.all():
mesa.categoria_add(categoria_legislador_departamental)
self.success('Se agregó la mesa a la categoria {}'.format(categoria_legislador_departamental.nombre))
# si es de capital entonces vota a intendente
if numero_de_seccion == 1:
# capital se pondera por circuitos
seccion.proyeccion_ponderada = True
seccion.save(update_fields=['proyeccion_ponderada'])
mesa.categoria_add(categoria_intendente_cordoba)
self.success('Se agregó la mesa a la categoria a intendente')
mesa.save()
self.log(mesa, created)
""" hay 3 mesas que son de una escuela y no son nros consecutivos
Se requiere copiar la mesa 1 3 veces antes de tirar este comando para que no falten esos tres datos
"""
mesa_8651 = Mesa.objects.get(numero=1)
mesa_8651.pk = None
mesa_8651.numero = 8651
mesa_8651.save()
mesa_8652 = Mesa.objects.get(numero=1)
mesa_8652.pk = None
mesa_8652.numero = 8652
mesa_8652.save()
mesa_8653 = Mesa.objects.get(numero=1)
mesa_8653.pk = None
mesa_8653.numero = 8653
mesa_8653.save()
| true |
e2523aeb88d7afa3744fdcce54c1e5ebfb250a8f | Python | schwittlick/cursor | /tools/tests/lib.py | UTF-8 | 1,378 | 3.171875 | 3 | [
"MIT"
] | permissive | import numpy as np
from cursor.collection import Collection
from cursor.path import Path
def project_point_to_plane(point, plane_point, plane_normal):
plane_normal = plane_normal / np.linalg.norm(plane_normal) # Normalize the plane normal vector
v = point - plane_point # Vector from point on plane to point
d = np.dot(v, plane_normal) # Project v onto plane normal to get distance
projected_point = point - d * plane_normal # Move point along normal vector
return projected_point
def compute_2d_coordinates(point_3d, basis1, basis2):
coord1 = np.dot(point_3d, basis1) / np.linalg.norm(basis1)
coord2 = np.dot(point_3d, basis2) / np.linalg.norm(basis2)
return np.array([coord1, coord2])
def project(point_list: list[list[list[float]]], plane_point: np.array = np.array([0, 0, 0]),
plane_normal: np.array = np.array([0, 0, 1]), basis1: np.array = np.array([1, 0, 0]),
basis2: np.array = np.array([0, 1, 0])):
c = Collection()
for line in point_list:
p = Path()
points = np.array(line)
for point in points:
projected_point = project_point_to_plane(point, plane_point, plane_normal)
projected_point_2d = compute_2d_coordinates(projected_point, basis1, basis2)
p.add(projected_point_2d[0], projected_point_2d[1])
c.add(p)
return c
| true |
c5596ed8422dec5c107eb3f2d56b1a93642b5720 | Python | xorudlee97/Tensorflow | /Day0821/T04_Iris.py | UTF-8 | 3,651 | 2.65625 | 3 | [] | no_license | from sklearn.model_selection import train_test_split
import numpy as np
import os
save_dir = os.path.dirname("D:/LTK_AI/LTK_AI_Study/AI_Study/Data/Numpy/")
cancer_data = np.load(save_dir+"/iris2_data.npy")
x_data = cancer_data[:,0:-1]
y_data = cancer_data[:,[-1]]
nb_classes = 3
print(x_data.shape)
print(y_data.shape)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data, random_state=77, test_size = 0.25
)
import tensorflow as tf
tf.set_random_seed(777)
train_num = 4
test_num = 1
X = tf.placeholder(tf.float32, shape=[None, train_num])
Y = tf.placeholder(tf.int32, shape=[None, test_num])
Y_one_hot = tf.one_hot(Y, nb_classes)
print("one_hot:", Y_one_hot)
Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])
print("reshape one_hot:", Y_one_hot)
def create_Relu_Layer(input_node, output_Node, layer_hypothesis, weight_name="weihgt", bias_name="bias"):
W = tf.get_variable(weight_name, shape=[input_node,output_Node], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.random_normal([output_Node]), bias_name)
logits = tf.matmul(layer_hypothesis, W) + b
hypothesis = tf.nn.relu(logits)
return hypothesis, logits, W, b
def create_Sigmoid_Layer(input_node, output_Node, layer_hypothesis, weight_name="weihgt", bias_name="bias"):
W = tf.Variable(tf.random_normal([input_node, output_Node]), weight_name)
b = tf.Variable(tf.random_normal([output_Node]), bias_name)
logits = tf.matmul(layer_hypothesis, W) + b
hypothesis = tf.sigmoid(logits)
return hypothesis, logits, W, b
def create_SoftMax_Layer(input_node, output_Node, layer_hypothesis, weight_name="weihgt", bias_name="bias"):
W = tf.Variable(tf.random_normal([input_node, output_Node]), weight_name)
b = tf.Variable(tf.random_normal([output_Node]), bias_name)
logits = tf.matmul(X,W)+ b
hypothesis = tf.nn.softmax(logits)
return hypothesis, logits, W, b
Node_number = 16
hypothesis, logits, W, b = create_Relu_Layer(train_num,Node_number,X, "relu1")
hypothesis, logits, W, b = create_Relu_Layer(Node_number,Node_number,hypothesis, "relu2")
hypothesis, logits, W, b = create_Relu_Layer(Node_number,Node_number,hypothesis, "relu3")
hypothesis, logits, W, b = create_Relu_Layer(Node_number,Node_number,hypothesis, "relu4")
hypothesis, logits, W, b = create_Relu_Layer(Node_number,train_num,hypothesis, "relu5")
hypothesis, logits, W, b = create_SoftMax_Layer(train_num, nb_classes,hypothesis)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=tf.stop_gradient([Y_one_hot])))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
prediction = tf.argmax(hypothesis, 1)
correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(3001):
_, cost_val, acc_Val = sess.run(
[optimizer, cost, accuracy],
feed_dict={X:x_train, Y: y_train}
)
if step % 100 == 0:
print("Step: {:5}\tCost: {:.3f}\tACC: {:.2%}".format(step, cost_val, acc_Val))
pred = sess.run(prediction, feed_dict={X:x_test})
for p , y in zip(pred, y_test.flatten()):
print("[{}] Prediction: {} True Y: {}".format(p==int(y), p, int(y)))
print(
"Accuracy: ",
accuracy.eval(
session=sess,
feed_dict={X:x_test, Y:y_test}
)
) | true |
0ff1b337faf2a49209c327f69fabe332cf04453d | Python | nadavsh22/nand2tetris | /Project 10/CompilationEngine.py | UTF-8 | 15,042 | 3.0625 | 3 | [] | no_license | ############################################################
# Imports
############################################################
import JackTokenizer
import Consts as co
############################################################
# Class definition
############################################################
class CompilationEngine:
"""
words describing what this class does
"""
def __init__(self, filename):
"""
constructor, creates a new compilation engine with the given input
:param filename: the input file name
"""
self._tokenizer = JackTokenizer.JackTokenizer(filename)
self._filename = filename.split('.')[0] + '.' + co.NEW_SUFFIX
self._writer = open(self._filename, 'w')
############################################################
# Private methods
###########################################################
def _tagOpenClose(self, openCloseBool, type='token'):
"""
writes xml tags
:param openCloseBool:indicates if it's open or close
:param type:
:return:
"""
start = '<' + openCloseBool * '/'
end = '>' + openCloseBool * '\n'
# if type not in co.elementDict.keys():
# print("hey")
self._writer.write(start + co.elementDict[type] + end)
def _printBlockDef(self):
"""
prints an xml block for current token
:return:
"""
self._printBlock(self._tokenizer.tokenType(), self._tokenizer.currentValue())
def _compileElse(self):
"""
compiles and writes the 'else' statement
"""
self._printBlockDef() # prints 'else'
self._tokenizer.advance()
self._printBlockDef() # prints '{'
self._tokenizer.advance()
self.compileStatements()
self._printBlockDef() # prints '}'
def _printBlock(self, tokenType, tokenVal):
"""
prints an xml block
:param tokenType:
:param tokenVal:
:return:
"""
self._tagOpenClose(0, tokenType)
val = tokenVal
if tokenVal in ['<', '>', '&']:
val = co.symbDict[tokenVal]
self._writer.write(" " + val + " ")
self._tagOpenClose(1, tokenType)
############################################################
# Public methods
###########################################################
def compileClass(self):
"""
compiles and writes the class
"""
self._writer.write('<class>\n')
self._tokenizer.advance()
self._printBlockDef() # prints 'Class
self._tokenizer.advance()
self._printBlockDef() # prints class name
self._tokenizer.advance()
self._printBlockDef() # prints '{'
self._tokenizer.advance()
while self._tokenizer.currentValue() != '}':
# print("compile field or subroutine")
started_subdecs = 0
if self._tokenizer.currentValue() in ['static', 'field']:
# if started_subdecs != 0:
# print("shouldn't put var after subroutine declarations!")
self.compileClassVarDec()
self._tokenizer.advance()
elif self._tokenizer.currentValue() in ['constructor', 'function', 'method']:
self.compileSubroutineDec()
self._tokenizer.advance()
self._printBlockDef() # prints }
self._writer.write('</class>\n')
def testTokenizer(self):
while self._tokenizer.hasMoreTokens():
self._tokenizer.advance()
self._tagOpenClose(0, self._tokenizer.tokenType())
self._writer.write(" " + self._tokenizer.currentValue() + " ")
self._tagOpenClose(1, self._tokenizer.tokenType())
def compileVarDec(self):
"""
compiles and writes the variable decleration
:return:
"""
self._writer.write('<varDec>\n')
while (self._tokenizer.currentValue() != ';'):
self._printBlockDef()
if self._tokenizer.currentValue() == 'arrayLength':
pass
self._tokenizer.advance()
self._printBlockDef() # print ;
self._writer.write('</varDec>\n')
def compileClassVarDec(self):
"""
compiles and writes the class variable decleration
:return:
"""
self._writer.write('<classVarDec>\n')
self._printBlockDef()
while (self._tokenizer.currentValue() != ';'):
self._tokenizer.advance()
self._printBlockDef()
self._writer.write('</classVarDec>\n')
def compileParameterList(self):
"""
compiles and writes the parameter list
:return:
"""
self._writer.write('<parameterList>\n')
self._tokenizer.advance()
i = 0
while self._tokenizer.currentValue() != ')':
self._printBlockDef()
self._tokenizer.advance()
i += 1
# if i==0:
# self._writer.write("\\n")
self._writer.write('</parameterList>\n')
def compileSubroutineDec(self):
"""
compiles and writes the subroutine declaration
:return:
"""
self._writer.write('<subroutineDec>\n')
while self._tokenizer.currentValue() != '(':
self._printBlockDef()
self._tokenizer.advance()
self._printBlockDef() # print (
self.compileParameterList()
self._printBlockDef() # print )
self.compileSubroutineBody()
self._writer.write('</subroutineDec>\n')
def compileSubroutineBody(self):
"""
compiles and writes the subroutine declaration
:return:
"""
self._writer.write('<subroutineBody>\n')
self._tokenizer.advance()
self._printBlockDef() # print {
self._tokenizer.advance()
while self._tokenizer.currentValue() == 'var':
self.compileVarDec()
self._tokenizer.advance()
while self._tokenizer.currentValue() != '}':
self.compileStatements()
self._printBlockDef()
self._writer.write('</subroutineBody>\n')
def compileStatements(self):
"""
checks which is the current statement and compiles it
"""
self._writer.write('<statements>\n')
while self._tokenizer.currentValue() != '}':
token = self._tokenizer.currentValue()
if (token == 'if'):
self._writer.write('<ifStatement>\n')
self.compileIf()
self._tokenizer.advance()
if self._tokenizer.currentValue() == 'else':
self._compileElse()
self._writer.write('</ifStatement>\n')
self._tokenizer.advance()
else:
self._writer.write('</ifStatement>\n')
continue
elif (token == 'do'):
self._writer.write('<doStatement>\n')
self.compileDo()
self._writer.write('</doStatement>\n')
self._tokenizer.advance()
elif token == 'let':
self._writer.write('<letStatement>\n')
self.compileLet()
self._writer.write('</letStatement>\n')
self._tokenizer.advance()
elif token == 'while':
self._writer.write('<whileStatement>\n')
self.compileWhile()
self._writer.write('</whileStatement>\n')
self._tokenizer.advance()
elif token == 'return':
self._writer.write('<returnStatement>\n')
self.compileReturn()
self._writer.write('</returnStatement>\n')
self._tokenizer.advance()
elif token == 'else':
self._compileElse()
# else:
# print("something not right")
# pass # used for debugging
self._writer.write('</statements>\n')
def compileDo(self):
"""
compiles and writes the 'do' statement
:return:
"""
self._printBlockDef() # prints do
self._tokenizer.advance()
firstToken = self._tokenizer.currentValue(), self._tokenizer.tokenType()
self._tokenizer.advance()
secondToken = self._tokenizer.currentValue(), self._tokenizer.tokenType()
self.compileSubroutineCall(firstToken, secondToken)
self._tokenizer.advance()
self._printBlockDef() # print the ';'
def compileLet(self):
"""
compiles and writes the 'let' statement
"""
self._printBlockDef() # prints 'let'
self._tokenizer.advance()
self._printBlockDef() # prints 'varName'
self._tokenizer.advance()
if self._tokenizer.currentValue() == '[':
self._printBlockDef() # print '['
self._tokenizer.advance()
self.compileExpression()
self._printBlockDef() # print ']'
self._tokenizer.advance()
self._printBlockDef() # print '='
self._tokenizer.advance()
self.compileExpression()
self._printBlockDef() # prints ';'
def compileWhile(self):
"""
compiles and writes the 'while' statement
"""
self._printBlockDef() # prints 'while'
self._tokenizer.advance()
self._printBlockDef() # prints '('
self._tokenizer.advance()
self.compileExpression()
self._printBlockDef() # prints ')'
self._tokenizer.advance()
self._printBlockDef() # prints '{'
self._tokenizer.advance()
self.compileStatements()
self._printBlockDef() # prints '}'
def compileReturn(self):
"""
compiles and writes the 'return' statement
"""
self._printBlockDef() # prints 'return'
self._tokenizer.advance()
if self._tokenizer.currentValue() != ';':
self.compileExpression()
self._printBlockDef() # prints ';'
def compileIf(self):
"""
compiles and writes the 'if' statement
"""
self._printBlockDef() # prints 'if'
self._tokenizer.advance()
self._printBlockDef() # prints '('
self._tokenizer.advance()
self.compileExpression()
self._printBlockDef() # prints ')'
self._tokenizer.advance()
self._printBlockDef() # prints '{'
self._tokenizer.advance()
self.compileStatements()
self._printBlockDef() # prints '}'
def compileExpression(self):
"""
VERY IMPORTANT NOTE: in order to look ahead in the term compiling,
after calling compile expression, the current token is the one after the
expression.
"""
self._writer.write('<expression>\n')
self.compileTerm()
currentToken = self._tokenizer.currentValue()
i = 0
while currentToken in ['+', '-', '*', '/', '&', '|', '<', '>', '=']:
self._printBlockDef()
self._tokenizer.advance()
self.compileTerm()
currentToken = self._tokenizer.currentValue()
self._writer.write('</expression>\n')
def compileTerm(self):
"""
VERY IMPORTANT NOTE: in order to look ahead in the term compiling,
after calling compile expression, the current token is the one after the
expression.
"""
self._writer.write('<term>\n')
firstToken = self._tokenizer.currentValue(), self._tokenizer.tokenType()
self._tokenizer.advance()
secondToken = self._tokenizer.currentValue(), self._tokenizer.tokenType()
# integerConstant | stringConstant | keywordConstant
if firstToken[1] in [co.STRING_CONST, co.INT_CONST] or firstToken[0] in [
'true', 'false', 'this', 'null']:
self._printBlock(firstToken[1], firstToken[0]) # print const
self._writer.write('</term>\n')
return
elif firstToken[0] in ['-', '~']: # unaryOp term
self._printBlock(firstToken[1], firstToken[0]) # print -/~
self.compileTerm()
self._writer.write('</term>\n')
return
if firstToken[0] == '(': # term = ('expression')
self._printBlock(firstToken[1], firstToken[0]) # print '('
self.compileExpression() # everything within brackets
self._printBlockDef() # print ')'
self._tokenizer.advance()
elif secondToken[0] == '[': # term = arrayname[expression]
self._printBlock(firstToken[1], firstToken[0]) # prints varName
self._printBlockDef() # prints '['
self._tokenizer.advance()
self.compileExpression()
self._printBlockDef() # print ']'
self._tokenizer.advance()
# term = subroutineCall: subroutine(expressionList) | thing.subroutine(elist)
elif firstToken[1] == co.IDENTIFIER and secondToken[0] in ['.', '(']:
self.compileSubroutineCall(firstToken, secondToken)
self._tokenizer.advance()
else: # a simple varName
self._printBlock(firstToken[1], firstToken[0]) # prints varName
self._writer.write('</term>\n')
def compileSubroutineCall(self, firstToken, secondToken):
"""
compiles and writes the a subroutine call
"""
self._printBlock(firstToken[1], firstToken[0]) # prints
# varName|className|subroutineName
if secondToken[0] == '.':
self._printBlockDef() # prints '.'
self._tokenizer.advance()
self._printBlockDef() # print subroutineName
self._tokenizer.advance()
self._printBlockDef() # print '('
self._tokenizer.advance()
self.compileExpressionList()
self._printBlockDef() # print ')'
def compileExpressionList(self):
"""
compiles and writes an expression list
"""
self._writer.write('<expressionList>\n')
if self._tokenizer.currentValue() == ')':
self._writer.write('</expressionList>\n')
return
self.compileExpression()
while self._tokenizer.currentValue() == ',':
self._printBlockDef() # print ','
self._tokenizer.advance()
self.compileExpression()
self._writer.write('</expressionList>\n')
def closeWriter(self):
self._writer.close()
| true |
c0409c57b9c506cda988a92c48498dcdb93fc83c | Python | DariaMikhailovna/Web | /my_search_engine/src/run.py | UTF-8 | 995 | 3.234375 | 3 | [] | no_license | from search_engine import *
def main():
site = 'google'
tag = input('Введите тег запроса:')
max_links_count = input('Введите максимальное выводимое количество ссылок:')
is_rec = input('Введите "yes", если хотите запустить рекурсивный поиск и "no", если не рекурсивный:')
if not max_links_count.isdigit():
print('Количество должно быть числом')
exit()
else:
max_links_count = int(max_links_count)
if is_rec == 'yes':
is_rec = True
elif is_rec == 'no':
is_rec = False
else:
print('Вы ошиблись в ответе на вопрос про рекурсию')
exit()
link_query = get_link(site, tag)
links = list(get_links(link_query, is_rec))
for link in links[:max_links_count]:
print(link)
if __name__ == '__main__':
main()
| true |
1621db6d659d264118c3478551486a9cc51a90a2 | Python | RedSpiderMkV/AncientPCAdminstrationTools | /RemoteShutdown-Windows/src/recipient.py | UTF-8 | 1,286 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 5 21:04:48 2015
@author: redspidermkv
"""
import time
import socket
import constants
import subprocess
class ShutdownListener:
hostAddress = ''
port = 0
socketConnection = None
def __init__(self, address, port):
self.port = port
self.hostAddress = socket.gethostname()
self.socketConnection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def BeginListener(self):
self.socketConnection.bind((self.hostAddress, self.port))
self.socketConnection.listen(5)
print('Listener started...')
while True:
conn, addr = self.socketConnection.accept()
command = conn.recv(1024)
if command == constants.COMMAND:
# run shutdown command
subprocess.call(['shutdown', '/s', '/t', '0'])
conn.send('Command receieved...')
conn.close()
def main():
time.sleep(120) # at some point, use a proper ping system to check when
# network ready instead of a dumb timer...
shutDownListener = ShutdownListener('0.0.0.0', 1885)
shutDownListener.BeginListener()
if __name__ == "__main__":
main()
| true |
681db90a433e19fe1324f7a34e866d1b9f3dbb6b | Python | CharlesBayley/BF-Compiler | /nodes.py | UTF-8 | 4,532 | 3.21875 | 3 | [] | no_license | #!/usr/bin/python
class AstNode:
def __init__(self, parentNode):
self.parentNode = parentNode
class StatementNode(AstNode):
def __init__(self, parentNode, statement):
super().__init__(parentNode)
self.statement = statement
def run(self, state):
if self.statement is '+':
state.increment()
elif self.statement is '-':
state.decrement()
elif self.statement is '>':
state.incrementCounter()
elif self.statement is '<':
state.decrementCounter()
elif self.statement is '.':
state.printByte()
elif self.statement is ',':
state.readByte()
def compile(self, nest):
statement = ''
if self.statement is '+':
statement = '++*ptr;'
elif self.statement is '-':
statement = '--*ptr;'
elif self.statement is '>':
statement = '++ptr;'
elif self.statement is '<':
statement = '--ptr;'
elif self.statement is '.':
statement = 'putchar(*ptr);'
elif self.statement is ',':
statement = '*ptr=getchar();'
return '{}{}\n'.format(nest * ' ', statement)
class ValueAdjustmentNode(AstNode):
def __init__(self, parentNode, amount):
super().__init__(parentNode)
self.amount = amount
def run(self, state):
state.increment(self.amount)
def compile(self, nest):
statement = ''
if self.amount >= 0:
statement = '*ptr += {};'.format(self.amount)
else:
statement = '*ptr -= {};'.format(-self.amount)
return '{}{}\n'.format(nest * ' ', statement)
class PointerAdjustmentNode(AstNode):
def __init__(self, parentNode, amount):
super().__init__(parentNode)
self.amount = amount
def run(self, state):
state.incrementCounter(self.amount)
def compile(self, nest):
statement = ''
if self.amount >= 0:
statement = 'ptr += {};'.format(self.amount)
else:
statement = 'ptr -= {};'.format(-self.amount)
return '{}{}\n'.format(nest * ' ', statement)
class StatementSequenceNode(AstNode):
def __init__(self, parentNode=None, statementNodes=None):
super().__init__(parentNode)
self.statementNodes = statementNodes
if not statementNodes:
self.statementNodes = []
def run(self, state):
for statementNode in self.statementNodes:
statementNode.run(state)
def append(self, node):
self.statementNodes.append(node)
def compile(self, nest):
nest += 1
message = ''
for statementNode in self.statementNodes:
message += statementNode.compile(nest)
return message
def _optimizeStackableNodes(self, instructionList, newNodeClass):
newStatementNodes = []
nextNode = newNodeClass(self, 0)
def resetNextNodeState(statementNode=None):
nonlocal newStatementNodes
nonlocal nextNode
if nextNode.amount != 0:
newStatementNodes.append(nextNode)
nextNode = newNodeClass(self, 0)
if statementNode:
newStatementNodes.append(statementNode)
for statementNode in self.statementNodes:
try:
if not statementNode.statement in instructionList:
raise AttributeError()
if statementNode.statement is instructionList[0]:
nextNode.amount += 1
else:
nextNode.amount -= 1
except AttributeError:
resetNextNodeState(statementNode)
resetNextNodeState()
self.statementNodes = newStatementNodes
def optimize(self):
self._optimizeStackableNodes('+-', ValueAdjustmentNode)
self._optimizeStackableNodes('><', PointerAdjustmentNode)
for node in self.statementNodes:
try:
node.optimize()
except AttributeError:
pass
class LoopNode(StatementSequenceNode):
def __init__(self, parentNode):
super().__init__(parentNode)
def run(self, state):
while not state.testByte():
super().run(state)
def compile(self, nest):
message = '{nest}while(*ptr) {{\n'
message += '{body}'
message += '{nest}}}\n'
return message.format(nest=nest * ' ', body=super().compile(nest))
| true |
babf9a580634132e4bf5b194d0a7b3d26f632fd3 | Python | RonnySun/tf-tutorials | /2_logit_regression/logit.py | UTF-8 | 2,657 | 2.78125 | 3 | [] | no_license | from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
n_features = 2
n_classes = 2
batch_size = 32
#help(plt.scatter)
#采用sklearn生成特征值是2,2分类,一共1000个样本,每个类别一个簇
x,y = datasets.make_classification(n_samples=1000,n_features=n_features,n_redundant=0,n_informative=1,
n_classes=n_classes,n_clusters_per_class=1)
#print(x,y)
#按照7:3分成训练集合和测试集合
train_x,test_x,train_y,test_y = train_test_split(x,y,test_size=0.3)
#可以用图表示出来
plt.scatter(train_x[:,0],train_x[:,1], marker='o', c=train_y,
s=25, edgecolor='k')
#plt.show()
#print("Train----",train_x,train_y)
#yield构建一个batch生成器
def get_batch(x_b,y_b,batch):
n_samples = len(x_b)
#print(n_samples)
for i in range(batch,n_samples,batch):
#print(i,batch,n_samples)
yield x_b[i-batch:i],y_b[i-batch:i]
#注意y_input是int型
x_input = tf.placeholder(tf.float32,shape=[None,n_features],name='X_IPNUT')
y_input = tf.placeholder(tf.int32,shape=[None],name='Y_INPUT')
#注意W和b的shape
W = tf.Variable(tf.truncated_normal([n_features,n_classes]),name='W')
b = tf.Variable(tf.zeros([n_classes],name='b'))
#二分类问题采用sigmoid函数,logits.shape(?,2)
logits = tf.sigmoid(tf.matmul(x_input,W)+b)
#这里直接调用了sparse_softmax_cross_entropy函数,包含了先将logits用softmax函数表示
#然后计算交叉熵,logits.shape(n,2) y_input.shape(n,),要先将table转成one-hot结构
loss = tf.losses.sparse_softmax_cross_entropy(labels=y_input,logits=logits)
loss = tf.reduce_mean(loss)
#学习速率,采用Adam算法
learning_rate = 0.01
opitimer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#计算准确度
predict = tf.arg_max(logits,1,name='predict')
#tf.metrics.accuracy返回值局部变量
acc, acc_op = tf.metrics.accuracy(labels=y_input,predictions=predict)
#训练模型
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for epoch in range(200):
#batch的形式训练
for tx,ty in get_batch(train_x,train_y,batch_size):
loss_value,_,acc_value=sess.run([loss,opitimer,acc_op],feed_dict={x_input:tx,y_input:ty})
if epoch%10==0:
print('loss = {}, acc = {}'.format(loss_value,acc_value))
#测试模型
acc_value_test = sess.run([acc_op],feed_dict={x_input:test_x ,y_input:test_y})
print('val acc = {}'.format(acc_value_test),"W:",sess.run(W),"b:",sess.run(b))
| true |
3935a5f0594732f8a187166219ee8c58a9ba9321 | Python | saltant-org/saltant | /tasksapi/tasks/container_tasks.py | UTF-8 | 8,390 | 2.75 | 3 | [
"MIT"
] | permissive | """Contains task functionality for container-based tasks.
Note that none of these functions themselves are registered with Celery;
instead they are used by other functions which *are* registered with
Celery.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import shlex
import timeout_decorator
from .utils import create_local_directory
class SingularityPullFailure(Exception):
"""An error for when Singularity pulls fail."""
pass
def run_docker_container_command(
uuid,
container_image,
command_to_run,
logs_path,
results_path,
env_vars_list,
args_dict,
):
"""Launch an executable within a Docker container.
Args:
uuid: A string containing the uuid of the job being run.
container_image: A string containing the name of the container
to pull.
command_to_run: A string containing the command to run.
logs_path: A string (or None) containing the path of the
directory containing the relevant logs within the container.
results_path: A string (or None) containing the path of the
directory containing any output files from the container.
env_vars_list: A list of strings containing the environment
variable names for the worker to consume from its
environment.
args_dict: A dictionary containing arguments and corresponding
values.
Raises:
KeyError: An environment variable specified was not available in
the worker's environment.
"""
# Import Docker. Useful to just import it here if we want to have
# workers which *only* can support Singularity.
import docker
# Get the Docker client on the host machine (see
# https://docker-py.readthedocs.io/en/stable/client.html#docker.client.from_env)
client = docker.from_env()
# Pull the Docker container. This pull in the latest version of the
# container (with the specified tag if provided).
client.images.pull(container_image)
# Find out where to put the logs
if logs_path is None:
volumes_dict = {}
else:
host_logs_path = os.path.join(
os.environ["WORKER_LOGS_DIRECTORY"], uuid
)
volumes_dict = {host_logs_path: {"bind": logs_path, "mode": "rw"}}
# Find out where to put the results
if results_path is not None:
host_results_path = os.path.join(
os.environ["WORKER_RESULTS_DIRECTORY"], uuid
)
volumes_dict[host_results_path] = {"bind": results_path, "mode": "rw"}
# Consume necessary environment variables
try:
environment = {key: os.environ[key] for key in env_vars_list}
except KeyError as e:
raise KeyError(
"Environment variable %s not present in the worker's environment!"
% e
)
# Also pass along the job's UUID
environment["JOB_UUID"] = uuid
# Compose the command to run
if args_dict:
command = "{executable} '{args}'".format(
executable=command_to_run, args=json.dumps(args_dict)
)
else:
command = command_to_run
# Run the executable
client.containers.run(
image=container_image,
command=command,
environment=environment,
volumes=volumes_dict,
)
def run_singularity_container_command(
uuid,
container_image,
command_to_run,
logs_path,
results_path,
env_vars_list,
args_dict,
):
"""Launch an executable within a Singularity container.
Args:
uuid: A string containing the uuid of the job being run.
container_image: A string containing the name of the container
to pull.
command_to_run: A string containing the command to run.
logs_path: A string (or None) containing the path of the
directory containing the relevant logs within the container.
results_path: A string (or None) containing the path of the
directory containing any output files from the container.
env_vars_list: A list of strings containing the environment
variable names for the worker to consume from its
environment.
args_dict: A dictionary containing arguments and corresponding
values.
Raises:
KeyError: An environment variable specified was not available in
the worker's environment.
SingularityPullFailure: The Singularity pull could not complete
with the specified timeout and number of retries.
"""
# Import Singularity library
from spython.main import Client as client
# Pull the specified container. This pull in the latest version of
# the container (with the specified tag if provided).
timeout = int(os.environ["SINGULARITY_PULL_TIMEOUT"])
num_retries = int(os.environ["SINGULARITY_PULL_RETRIES"])
# Put a timeout on the client pull method
client.pull = timeout_decorator.timeout(
timeout, timeout_exception=StopIteration
)(client.pull)
for retry in range(num_retries):
try:
singularity_image = client.pull(
image=container_image,
pull_folder=os.environ["WORKER_SINGULARITY_IMAGES_DIRECTORY"],
name_by_commit=True,
)
break
except StopIteration:
# If this is the last retry, raise an exception to indicate
# a failed job
if retry == num_retries - 1:
raise SingularityPullFailure(
(
"Could not pull {image_url} within "
"{timeout} seconds after {num_retries} retries."
).format(
image_url=container_image,
timeout=timeout,
num_retries=num_retries,
)
)
# Find out where to put the logs
if logs_path is None:
bind_option = []
else:
# Create the host logs path. This is required by the Singularity
# library (though not the Docker library)
host_logs_path = os.path.join(
os.environ["WORKER_LOGS_DIRECTORY"], uuid
)
create_local_directory(host_logs_path)
# Build the bind option to pass on to Singularity
bind_option = [
host_logs_path.rstrip("/") + ":" + logs_path.rstrip("/")
]
# Find out where to put the results
if results_path is not None:
# Create the host results path
host_results_path = os.path.join(
os.environ["WORKER_RESULTS_DIRECTORY"], uuid
)
create_local_directory(host_results_path)
# Build the bind option to pass on to Singularity
bind_option += [
host_results_path.rstrip("/") + ":" + results_path.rstrip("/")
]
# Check for required environment variables. Note that by default
# Singularity containers have access to their outside environment
# variables, so we don't need to pass them along explicitly like we
# need to for a Docker container.
try:
# Test to see that all keys are defined
{key: os.environ[key] for key in env_vars_list}
except KeyError as e:
raise KeyError(
"Environment variable %s not present in the worker's environment!"
% e
)
# Pass along the job's UUID
os.environ["JOB_UUID"] = uuid
# Compose the command to run
command = shlex.split(command_to_run)
if args_dict:
command += [json.dumps(args_dict)]
# Run the executable
iter_ = client.execute(
image=singularity_image, command=command, bind=bind_option, stream=True
)
# Okay, here's some magic. The issue is that without stream=True in
# the above call, there's no way of determining the return code of
# the above operation, and so no way of knowing whether it failed.
# However, with stream=True, it'll raise a
# subprocess.CalledProcessError exception for any non-zero return
# code. Great! But before we can get that exception triggered we
# need to iterate through all of the command's stdout, which is what
# the below (seemingly useless) loop does.
for _ in iter_:
pass
| true |
80576b8bed00e6bb985a2b07b65fa5d1dc18735b | Python | tochyepez/test_02_ibero | /render.py | UTF-8 | 1,706 | 3.15625 | 3 | [] | no_license |
import matplotlib.pyplot as plt
def get_data():
files = ["article2.output.txt", "articles1.output.txt", "articles3.output.txt"]
collector_f = {}
for f in files:
collector_f[f] = []
with open(f, "r") as fh:
for line in fh:
parts = line.replace('(', '').replace(')', '').split(",")
if '[' in parts[0]:
continue
if len(parts) > 1:
c = parts[1].replace('\'', '').replace("\n", '')
subparts = c.split("+")
for sp in subparts:
minipart = sp.replace('"','').split("*")
clean = [m.rstrip().lstrip() for m in minipart]
collector_f[f].append(clean)
return collector_f
def order_information_and_plot():
data = get_data()
box = {}
for key in data.keys():
for item in data[key]:
if item[1] in box:
box[item[1]] += float(item[0])
else:
box[item[1]] = float(item[0])
counter = 0
index = []
label = []
weight = []
for k in box.keys():
index.append(counter)
label.append(k)
weight.append(box[k])
counter += 1
plt.rcdefaults()
fig, ax = plt.subplots()
ax.barh(index, weight, align="center")
ax.set_yticks(index)
ax.set_yticklabels(label, fontsize=5)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('weight')
ax.set_title('Sentimental Analysis - Topic model')
plt.savefig("sentimental_analysis_topic_model.eps", format="eps", dpi=1000)
if __name__ == '__main__':
order_information_and_plot() | true |
ff52c5b65dffe571102c968eeb9fd34091046877 | Python | k47ma/Scraping-Interface | /lib/Interface_unicode.py | UTF-8 | 4,435 | 3.109375 | 3 | [] | no_license | # coding=utf-8
import requests
import threading
import unicodedata
from bs4 import BeautifulSoup
from tkinter import *
# tool for looking up unicode
# look up the given character in unicode list
def lookup(value, type):
hex_code = ""
dec_code = ""
# search unicode by hex or dec code
if type == "hex" or type == "dec":
# convert decimal number to hex code
try:
if type == "dec":
dec_code = value
hex_code = hex(int(value))[2:]
else:
dec_code = str(int(value, 16))
hex_code = value
except ValueError:
return None
except OverflowError:
return None
elif type == "name":
# search unicode by name
value = value.replace(" ", "+")
search_url = "http://www.fileformat.info/info/unicode/char/search.htm?q=" + value.lower() + "&preview=entity"
source_code = requests.get(search_url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
result_table = soup.find('table', class_="table")
result = result_table.find('td')
if not result:
return None
hex_code = result.find('a')["href"].split("/")[0]
dec_code = str(int(hex_code, 16))
try:
uni_char = unichr(int(dec_code))
name = unicodedata.name(uni_char)
except ValueError:
return -1
except OverflowError:
return None
return {"Hex": hex_code, "Dec": dec_code, "Name": name, "Char": uni_char}
class UnicodeLookUpFrame(LabelFrame):
def __init__(self, parent):
LabelFrame.__init__(self, parent)
self["text"] = "Unicode Character Lookup Tool"
search_frame = Frame(self)
search_frame.pack(side=TOP, fill=BOTH, expand=True, padx=6, pady=5)
self.entry_var = StringVar()
self.entry = Entry(search_frame, textvariable=self.entry_var)
self.entry.pack(side=LEFT)
self.entry.bind("<Key-Return>", self.start_search)
btn = Button(search_frame, text="Preview", command=self.start_search)
btn.pack(side=LEFT, padx=5, pady=5)
lbl2 = Label(search_frame, text="Search by:")
lbl2.pack(side=LEFT)
self.type_var = StringVar()
type_hex = Radiobutton(search_frame, text="Hex Code", variable=self.type_var, value="hex")
type_hex.pack(side=LEFT)
type_dec = Radiobutton(search_frame, text="Decimal Code", variable=self.type_var, value="dec")
type_dec.pack(side=LEFT)
type_name = Radiobutton(search_frame, text="Character Name", variable=self.type_var, value="name")
type_name.pack(side=LEFT)
self.type_var.set("hex")
result_frame = Frame(self)
result_frame.pack(side=BOTTOM, fill=X, expand=False, padx=6, pady=(0, 5))
self.result = StringVar()
self.target = Entry(result_frame, font=12, relief=FLAT, state=DISABLED, textvariable=self.result, disabledforeground="black")
self.target.pack(side=LEFT, fill=X, expand=True)
def start_search(self, *args):
thread = SearchThread(type=self.type_var.get(), value=self.entry.get(), target=self.target, var=self.result)
thread.start()
class SearchThread(threading.Thread):
def __init__(self, type, value, target, var):
threading.Thread.__init__(self)
self.type = type
self.value = value
self.target = target
self.var = var
def run(self):
self.target["state"] = "normal"
if not self.value:
self.target["disabledforeground"] = "red"
self.var.set("^ Please input a value. ^")
self.target["state"] = "disabled"
return
self.target["disabledforeground"] = "black"
self.var.set("Searching...")
result = lookup(self.value, self.type)
if result is None:
self.var.set("Cannot find the unicode with " + self.type + " \"" + self.value + "\". Please check your input.")
self.target["state"] = "disabled"
return
elif result == -1:
self.var.set("Cannot display this unicode.")
self.target["state"] = "disabled"
return
self.var.set(result["Name"].lower() + ": " + result["Char"] + " | Hex: " + result["Hex"] + " | Dec: " + result["Dec"])
self.target["state"] = "disabled"
| true |