blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
db11e0934114548d31745405551f221c50c927e2 | Python | TomaszMichalski/rqa | /dbservice/database/insertions/misc.py | UTF-8 | 2,455 | 2.703125 | 3 | [] | no_license | # database.insertions.misc.py
from api import airly_reader
from database.getters import misc
def insert_address(cur, location, address):
latitude = location.get('latitude')
longitude = location.get('longitude')
country = address.get('country')
city = address.get('city')
street = address.get('street')
st_number = address.get('number')
address_id = misc.get_address_id_by_coord(cur, latitude, longitude)
if address_id is None:
print(
"Executing query: INSERT INTO addresses (latitude, longitude, "
"country, city, street, st_number) "
"VALUES ({0}, {1}, {2}, {3},"
" {4}, {5});".format(latitude, longitude, country, city, street, st_number))
cur.execute(
"INSERT INTO addresses (latitude, longitude, "
"country, city, street, st_number) "
"VALUES (%s, %s, %s, %s, %s, %s);",
(latitude, longitude, country, city, street, st_number))
return misc.get_address_id_by_coord(cur, latitude, longitude)
def insert_nearest_airly_installations(cur, latitude, longitude, *args, **kwargs):
max_dist = kwargs.get('max_dist', None)
max_res = kwargs.get('max_res', None)
installations = airly_reader.get_nearest_installations(latitude, longitude, max_dist=max_dist, max_res=max_res)
for installation in installations:
installation_id = installation.get('id')
if misc.get_airly_installation_by_id(cur, installation_id) is None:
insert_airly_installation(cur, installation)
def insert_airly_installation(cur, installation_info):
installation_id = installation_info.get('id')
location = installation_info.get('location')
address = installation_info.get('address')
address_id = insert_address(cur, location, address)
elevation = installation_info.get('elevation')
airly = installation_info.get('airly')
if not misc.get_airly_installation_by_id(cur, installation_id):
print(
"Executing query: INSERT INTO airly_installations (installation_id, address_id, elevation, airly)"
" VALUES ({0}, {1}, {2}, {3});".format(installation_id, address_id, elevation, airly))
cur.execute("INSERT INTO airly_installations (installation_id, address_id, elevation, airly)"
" VALUES (%s, %s, %s, %s);",
(installation_id, address_id, elevation, airly))
return installation_id
| true |
26d227680afeedc2819cf9e113ce3e7b6928ed0e | Python | hualili/opencv | /IP110-Deep-Learning/106-pytest55.py | UTF-8 | 468 | 3.515625 | 4 | [] | no_license | #!/usr/bin/python2.7
import math
def LoG2d(x, y, sigma):
xyp2 = x ** 2 + y ** 2
LoG = ((xyp2 - 2 * sigma**2) /(math.sqrt(2 * math.pi) * math.pow(sigma, 5))) * math.exp(-xyp2 / (2 * sigma**2))
return LoG
if __name__ == "__main__":
#try to see if position (-1, 0), (1, 0) and (0, -1), (0, 1) are all same
print LoG2d(1, 0, 1.8)
print LoG2d(-1, 0, 1.8)
print LoG2d(0, 1, 1.8)
print LoG2d(0, -1, 1.8)
#seed
print LoG2d(0, 0, 1.8) | true |
cc51a6222487508929d48fe20946559e5e6ea757 | Python | svdreijen/Cognitive_face_test | /cognitive_services_face/Face_verification_final.py | UTF-8 | 3,999 | 2.75 | 3 | [] | no_license | # Import libraries
import numpy as np
import cv2
import matplotlib.patches as patches
import requests
import matplotlib.pyplot as plt
import json
# Define functions to post request to face and vision API's
def detect_face(pic):
headers = {'Ocp-Apim-Subscription-Key' : subscription_key_face,
'Content-Type': 'application/octet-stream'}
params = {'returnFaceLandmarks' : 'True'}
response = requests.post(
face_api_url + "detect", headers=headers, params=params, data=pic)
response.raise_for_status()
analysis_face = response.json()
return analysis_face
def verify_face(faceId, personId, personGroupId):
headers = {'Ocp-Apim-Subscription-Key' : subscription_key_face,
'Content-Type' : 'application/json'}
data = {'faceId' : faceId,
'personId' : personId,
'personGroupId': personGroupId}
data = json.dumps(data)
response = requests.post(
face_api_url + "verify", headers=headers, data=data)
response.raise_for_status()
analysis_verify = response.json()
return analysis_verify
def object_analysis(pic):
headers = {'Ocp-Apim-Subscription-Key': subscription_key_vision,
'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Objects'}
response = requests.post(
vision_base_url + 'analyze', headers=headers, params=params, data=pic)
response.raise_for_status()
analysis = response.json()
return analysis
# Define the keys and api urls for face api
subscription_key_face = '6fa0509cc7a64886a6775e56ec98d763'
face_api_url = 'https://westeurope.api.cognitive.microsoft.com/face/v1.0/'
# Define the keys and api urls for vision api
subscription_key_vision = "4303a199f4c749bf911015d6dd999ae3"
vision_base_url = "https://westeurope.api.cognitive.microsoft.com/vision/v2.0/"
# Capture webcam stream and read out frame
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
# Convert the frame numpy array to .jpg binary string
frame_str = cv2.imencode('.jpg', frame)[1].tostring()
# Detect the face in the picture
analysis_face = detect_face(frame_str)
# Verify if the detected face matches person database
if analysis_face:
analysis_verify = verify_face(analysis_face[0]['faceId'], siem['personId'], 'test')
# Detect object in the picture
analysis = object_analysis(frame_str)
# Create a Rectangle
# For the object
if analysis['objects']:
rectangle = (analysis['objects'][0]['rectangle']['x'], analysis['objects'][0]['rectangle']['y'])
width = analysis['objects'][0]['rectangle']['w']
height = analysis['objects'][0]['rectangle']['h']
rect = patches.Rectangle(rectangle, width, height, linewidth=1,edgecolor='r',facecolor='none')
box1 = [np.array(rectangle), np.array(rectangle) + np.array((height, width))]
# For the face
if analysis_face:
rectangle2 = (analysis_face[0]['faceRectangle']['left'], analysis_face[0]['faceRectangle']['top'])
width2 = analysis_face[0]['faceRectangle']['width']
height2 = analysis_face[0]['faceRectangle']['height']
rect2 = patches.Rectangle(rectangle2, width2, height2, linewidth=1,edgecolor='b',facecolor='none')
box2 = [np.array(rectangle2), np.array(rectangle2) + np.array((height2, width2))]
# Display the image
fig,ax = plt.subplots(1)
ax.imshow(frame)
# Draw the boxes around face and object
if analysis['objects']:
ax.add_patch(rect)
if analysis_face:
ax.add_patch(rect2)
ax.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
# Determine whether face and object overlap
overlap = (box2[0][0] < box1[1][0] and box2[1][0] > box1[0][0]) and (box2[0][1] < box1[1][1] and box2[1][1] > box1[0][1])
image_caption = 'Overlap: ' + str(overlap) + ' ' 'Identical: ' + str(faces['isIdentical']).capitalize()
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| true |
3c2c6cc2c77d8b84f93304e08deb129f60323de2 | Python | pombredanne/django-roesti | /roesti/models.py | UTF-8 | 10,649 | 2.71875 | 3 | [
"MIT"
] | permissive | import collections
from hashlib import md5
import pickle
from django.db import models, transaction
def freeze(obj):
# If this is dict-like, return a sorted tuple.
if hasattr(obj, 'items') and hasattr(obj.items, '__call__'):
return tuple(sorted((key, freeze(value))
for key, value in obj.items()))
# If a list, return a tuple.
if isinstance(obj, list):
return tuple(freeze(value) for value in obj)
# If a set, return a sorted tuple.
if isinstance(obj, set):
return tuple(sorted(obj))
return obj
def make_hash(obj):
return md5(pickle.dumps(freeze(obj))).hexdigest()
class HashedModelManager(models.Manager):
def from_dict(self, item_dict):
"""
Constructor that also initializes `content_hash`.
"""
instance = self.model()
related = instance.set_dict(item_dict)
return instance, related
@transaction.atomic
def ensure(self, items):
"""
Inserts each item in `items` to the database, if it doesn't already
exist. Similar to an upsert operation. `items` may be dict-like
mappings or model instances.
The `content_hash` of each item will be (re)calculated on each item.
Returns list of model instances.
"""
return list(self._ensure_impl(items))
def _ensure_impl(self, items):
"""
Implmentation of `HashedModelManager.ensure`.
In separate function so we can avoid nested transactions.
"""
# Normalize `items` into a list of model instances where the primary
# key is properly set.
instances = []
related_mapping = collections.defaultdict(set)
for item in items:
if isinstance(item, collections.Mapping):
instance, related_models = self.from_dict(item)
for key, related_instances in related_models.items():
related_mapping[key].update(related_instances)
instances.append(instance)
elif isinstance(item, HashedModel):
if not item.content_hash:
item.content_hash = item.get_content_hash()
instances.append(item)
else:
raise ValueError('Item must be a Mapping or HashedModel')
instances = self._do_insert(self.model, instances)
# Now, insert all the instances that have back-references to this one.
for (model, field_name), related_instances in related_mapping.items():
self._do_insert(model, related_instances, skip_ensure=[self.model])
return instances
def _do_insert(self, InsertModel, instances, skip_ensure=[]):
# Eliminate potential duplicate instances.
instances = {
instance.pk: instance
for instance in instances
}.values()
# Ensure that foreign keys to other HashedModels exist.
# First, get the foreign key fields we need to work with, aggregated
# by table. eg, {table1: [field1, field2], table2: [field3]}
table_references = collections.defaultdict(list)
for field_name in InsertModel.hash_fields:
field = InsertModel._meta.get_field(field_name)
if isinstance(field, models.ForeignKey):
table = field.rel.to
if issubclass(table, HashedModel):
table_references[table].append(field.name)
# Next, ensure that references for each table exist.
for table, field_names in table_references.items():
if table in skip_ensure:
continue
table.objects._ensure_impl(getattr(instance, field_name)
for field_name in field_names
for instance in instances)
# Get the keys of the items that already exist in the database.
all_pks = set(inst.pk for inst in instances)
existing_pks = InsertModel.objects.filter(
pk__in=all_pks).values_list('pk', flat=True)
# Insert instances that aren't in the db yet.
# If everything already is in the db, skip the empty `bulk_create`.
if len(all_pks) > len(existing_pks):
InsertModel.objects.bulk_create(instance
for instance in instances
if instance.pk not in existing_pks)
return instances
class HashField(models.CharField):
def __init__(self, **kwargs):
defaults = {
'max_length': 32
}
defaults.update(kwargs)
super(HashField, self).__init__(**defaults)
class HashedModel(models.Model):
objects = HashedModelManager()
content_hash = HashField(primary_key=True)
def save(self, *args, **kwargs):
self.content_hash = self.get_content_hash()
super(HashedModel, self).save(*args, **kwargs)
def _get_hash_field(self, field_name, reverse_relations):
value = getattr(self, field_name)
# If this is a related field manager, get the fields as an unordered
# set of the instance IDs (hashes).
if issubclass(value.__class__, models.Manager):
for (model, field), instances in reverse_relations.items():
if value.model == model and field == value.field.get_attname():
value = set(instance.pk for instance in instances)
break
return value
def _get_hash_field_dict(self, reverse_relations):
return {
field_name: self._get_hash_field(field_name, reverse_relations)
for field_name in self.hash_fields
}
def get_content_hash(self, reverse_relations={}):
return make_hash(self._get_hash_field_dict(reverse_relations))
def _accumulate_dict(self, target, source):
if not source:
return
for key, value in source.items():
target[key].update(value)
def set_dict(self, item_dict):
# Will accumulate ManyToMany relations here, in the form:
# {ModelClass: [instance1, instance2, ...]}
reverse_relations = collections.defaultdict(set)
for field_name, value in item_dict.items():
field = self._meta.get_field(field_name)
# If this is a dict-like value...
if isinstance(value, collections.Mapping):
# ... And it corresponds to a reference to another HashedModel,
# then try to instantiate it.
if issubclass(field.rel.to, HashedModel):
value, related = field.rel.to.objects.from_dict(value)
self._accumulate_dict(reverse_relations, related)
# If this is a non-string iterable...
elif isinstance(value, collections.Iterable) and not isinstance(
value, str):
# ... And it corresponds to a reverse relation, we will try to
# create the objects and accumulate them.
if type(field) == models.ManyToOneRel:
RelatedModel = field.related_model
# For each reverse relation, create the instance and
# accumulate in `reverse_relations`.
for item in value:
instance, related = RelatedModel.objects.from_dict(item)
key = (RelatedModel, field.remote_field.get_attname())
reverse_relations[key].add(instance)
self._accumulate_dict(reverse_relations, related)
# Don't set related fields on the instance itself.
value = None
# Set this value on the instance.
if value is not None:
setattr(self, field_name, value)
# Calculate the hash for this instance.
self.content_hash = self.get_content_hash(reverse_relations)
# Set all the back-references to this instance.
for (model, field_name), instances in reverse_relations.items():
# If this set of relations doesn't refer to this model, skip.
field = model._meta.get_field(field_name)
if self.__class__ != field.rel.to:
continue
for instance in instances:
setattr(instance, field_name, self.content_hash)
return reverse_relations
def __str__(self):
return self.content_hash
class Meta:
abstract = True
class HashedListModelManager(models.Manager):
def get_list(self, list_hash):
return self.filter(list_hash=list_hash)
@transaction.atomic
def ensure_list(self, ItemModel, items):
# Ensure the list items exist.
item_instances = ItemModel.objects._ensure_impl(items)
# Calculate the hash of this list as the hash of the list of its keys.
list_hash = make_hash([item.pk for item in item_instances])
# If this list already exists, return it.
lst = self.filter(pk=list_hash)
if len(lst) > 0:
return lst[0]
# Create the HashedList model instance...
lst_instance = self.create(pk=list_hash)
# ... and assign each of these list items to it with back references.
ListItemModel = self.model.items.field.model
items = ListItemModel.objects.ensure_items(list_hash, item_instances)
return lst_instance
class HashedList(models.Model):
objects = HashedListModelManager()
list_hash = HashField(primary_key=True)
def __str__(self):
return self.list_hash
class HashedListItemModelManager(HashedModelManager):
def get_list(self, list_hash):
return self.filter(list_hash=list_hash)
def ensure_items(self, list_hash, items):
return self.bulk_create([
self.model(
list_hash_id=list_hash,
order=order,
item=item
)
for order, item in enumerate(items, 1)
])
class HashedListItemModel(models.Model):
"""
Implements a model that groups items in a list, with `list_hash` equal to a
hash of the list's references. The concrete class must define `item` and
`list_hash` fields.
"""
objects = HashedListItemModelManager()
list_hash = models.ForeignKey(HashedList, related_name='items')
order = models.PositiveIntegerField()
# Set this field in the concrete class.
#item = models.ForeignKey(ListItemModel)
class Meta:
abstract = True
ordering = ('order',)
| true |
776d062439d06e9fcaceb3faf729f8c242fc7108 | Python | RSIP4SH/PythonFramework | /saveable/saveable.py | UTF-8 | 1,141 | 2.546875 | 3 | [] | no_license | __author__ = 'Aubrey'
import abc
#import configs.base_configs as base_configs
import copy
from configs import base_configs
class Saveable(object):
#__metaclass__ = abc.ABCMeta
def __init__(self,configs=base_configs.Configs()):
self._name_params = {}
self.configs = copy.deepcopy(configs)
pass
@property
def name_params(self):
return self._name_params
@property
def prefix(self):
return "No Name"
@property
def name_string(self):
s = self.prefix
field_delim = '_'
field_value_delim = '='
default_options = {
'include_field_name': True
}
for k, value in self.name_params.items():
d = default_options.copy()
d.update(value)
s += '_'
if d['include_field_name']:
s += k + '='
s += str(getattr(self,k))
return s
if __name__ == "__main__":
s = Saveable()
s.x = 10
s._name_params['x'] = {}
print s.name_string
s._name_params['x']['include_field_name'] = False
print s.name_string
print 'Test Run'
| true |
85899c911f44357b905685eab565fd52ac764fad | Python | Vaylide/pigeon | /breadcrumbs/poke.py | UTF-8 | 504 | 2.859375 | 3 | [] | no_license | # echo:
# repeats your message at you, unless
# the message is self-originated
class poke:
def __init__(self, client):
self.client = client
def act(self, msg):
self. client.privmsg(msg.targ, "ow")
return 0
def eat(self, msg):
return 0
class poek:
def __init(self, client):
self.cient = client
def act(self, msg):
self.client.privmsg(msg.targ, "{}: ow".format(msg.orig))
return 0
def ear(self, msg):
return 0
| true |
2241f6d45a42228faa31116af55bdadba33f9a6b | Python | NULanguageLearning/Latin | /LatinN改.py | UTF-8 | 1,174 | 3.171875 | 3 | [] | no_license | import random
#名詞の活用語尾
suf=[["a","ae","am","ae","a","ae","arum","as","is","is"],["us","i","um","o","o","i","orum","os","is","is"]\
,["um","i","um","o","o","a","orum","a","is","is"]]
n=["単数","複数"]
s=["主格","属格","対格","与格","奪格"]
dic={}
def makedic(read):
file=open(read,"r")
f=file.read()
file.close()
f1=f.split("\n")
while "" in f1:
f1.remove("")
for i in range(len(f1)):
f1[i]=f1[i].split(",")
for a in f1:
dic[a[0]]=[int(a[1]),a[2],int(a[3])]
makedic("dicN.txt")
print("次の名詞を、指示された格の形でラテン語で書け")
while True:
A1=random.randint(0,1)
A2=random.randint(1,4)
num=n[A1]
stat=s[A2]
QWard=random.choice(list(dic.keys()))
B=dic[QWard][0]
QW=QWard.format(suf[B][0])
QAns=QWard.format(suf[B][A1*5+A2])#答え
ans=input("{0}({1})の{2}{3}\n".format(QW,dic[QWard][1],num,stat))
if ans==QAns:
print("正解")
else:
print("残念。正解は{}".format(QAns))
#file=open("wrong.txt","a")
#file.write("{0},{1},{2}\n".format(QW,A1,A2))
#file.close()
| true |
de2bd1d132f78687050ab0aa0d4bec330fd5a862 | Python | yunini2/knowledge | /pearson.py | UTF-8 | 691 | 3 | 3 | [] | no_license | import math
def pearson(vector1, vector2):
n = len(vector1)
# simple sums
sum1 = sum(float(vector1[i]) for i in range(n))
sum2 = sum(float(vector2[i]) for i in range(n))
# sum up the square
sum1_pow = sum([pow(v, 2.0) for v in vector1])
sum2_pow = sum([pow(v, 2.0) for v in vector2])
# sum up the products
p_sum = sum([vector1[i] * vector2[i] for i in range(n)])
rho = (p_sum - (sum1 * sum2/n)) / math.sqrt((sum1_pow - pow(sum1, 2)/n) * (sum2_pow - pow(sum2, 2)/n))
if rho == 0.0:
return 0.0
return rho
pearson(list(data['xx']), listdata['xx'])) # 0.9981191651097102
pearson(list(data['xx']), list(data['xxx'])) # -0.07462375694166053
| true |
2e6857d2ac9a527924071e0ac42ca8e43e1cb65e | Python | anliec/CV_homeworks | /HW6/question1.py | UTF-8 | 1,015 | 2.671875 | 3 | [] | no_license | import cv2
import numpy as np
def gaussian_reduce(im: np.ndarray):
im = cv2.GaussianBlur(im, (3, 3), 0)
im = cv2.resize(im, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)
return im
def gaussian_expend(im: np.ndarray):
im = cv2.resize(im, (0, 0), fx=2.0, fy=2.0, interpolation=cv2.INTER_NEAREST)
im = cv2.GaussianBlur(im, (3, 3), 0)
return im
def laplacian_reduce(im: np.ndarray):
r = gaussian_reduce(im.copy())
e = gaussian_expend(r.copy())
im = cv2.resize(im, (e.shape[1], e.shape[0]))
return r, im.astype(np.int) - e.astype(np.int)
if __name__ == '__main__':
seq1 = cv2.imread("subject/DataSeq1/yos_img_01.jpg", cv2.IMREAD_GRAYSCALE)
img = seq1
cv2.imwrite("Images/ps5-1-1-0.png", img)
for i in range(4):
img, error = laplacian_reduce(img)
error += 128
cv2.imwrite("Images/ps5-1-1-" + str(i + 1) + ".png", img.astype(np.uint8))
cv2.imwrite("Images/ps5-1-2-" + str(i + 1) + ".png", error.astype(np.uint8))
| true |
e96b472d38c56302c9953d478f416787999cf7a3 | Python | daniel-reich/ubiquitous-fiesta | /3gziWsCxqGwGGZmr5_10.py | UTF-8 | 168 | 3.109375 | 3 | [] | no_license |
def is_prime(n):
return n>1 and all(n%i for i in range(2, int(n**0.5)+1))
def fat_prime(a, b):
return [x for x in range(min(a,b), max(a,b)+1) if is_prime(x)][-1]
| true |
212f14db4836078904b8966df84ddcdf6fcd1e54 | Python | nguyenngoclinhchi/CS3244-Project | /linh_chi.py | UTF-8 | 2,877 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""Linh_Chi.ipynb
Original file is located at
https://colab.research.google.com/drive/13SEnWoVQlVhCtoKFGTQQbbwpF0k3MBcD
"""
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', 100)
from tqdm import tqdm_notebook as tqdm
# for preprocessing the data
from sklearn.preprocessing import StandardScaler
# the model
from sklearn.svm import SVC
from sklearn.decomposition import PCA
# for combining the preprocess with model training
from sklearn.pipeline import Pipeline
# for optimizing the hyperparameters of the pipeline
from sklearn.model_selection import GridSearchCV
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
# Import files
train_values = pd.read_csv('train_values.csv', index_col='building_id')
train_labels = pd.read_csv('train_labels.csv', index_col='building_id')
selected_features = ['foundation_type',
'area_percentage',
'height_percentage',
'count_floors_pre_eq',
'land_surface_condition',
'has_superstructure_cement_mortar_stone']
train_values_subset = train_values[selected_features]
train_values_subset = pd.get_dummies(train_values_subset)
# Train the model
pipe_steps = [('scaler', StandardScaler()), ('pca', PCA()), ('SupVM', SVC(kernel='rbf'))]
pipe = Pipeline(pipe_steps)
param_grid = {'pca__n_components': [2],
'SupVM__C' : [0.1, 0.5, 1, 10, 30, 40, 50, 70, 100, 500, 1000],
'SupVM__gamma' : [0.001, 0.005, 0.01, 0.05, 0.07, 0.1, 0.5, 1, 5, 10, 50],
}
print('Start fitting training data')
for num_cv in tqdm(range(4, 7)):
gs = GridSearchCV(pipe, param_grid, cv=num_cv)
gs.fit(train_values_subset, train_labels.values.ravel())
print("Best fit parameter for %d fold CV" % num_cv, gs.best_params_)
# Evaluate the model
from sklearn.metrics import f1_score
in_sample_predict = gs.predict(train_values_subset)
f1_score(train_labels, in_sample_predict, average='micro')
# Read values then output Results
test_values = pd.read_csv('test_values.csv', index_col='building_id')
test_values_subset = test_values[selected_features]
test_values_subset = pd.get_dummies(test_values_subset)
predictions = gs.predict(test_values_subset)
submission_format = pd.read_csv('submission_format' + '_rbf' + '_cv=' + str(num_cv) + '.csv',
index_col='building_id')
my_submission = pd.DataFrame(data=predictions,
columns=submission_format.columns,
index=submission_format.index)
my_submission.head()
my_submission.to_csv('submission.csv')
| true |
b039720e0a09b3126d928c66ff4a7f4f976435e9 | Python | sylvaus/presentations | /python/code/exercise_solutions/01_print_input.py | UTF-8 | 437 | 4.4375 | 4 | [] | no_license | """
Exercise 1:
Fill your_function to make it ask for a name and then print a welcome message
Help: to display text to the operator use the print function
Example:
print("hello")
You can print print multiple things by separating them by a comma:
Example:
print("hello", "and", "welcome")
"""
def your_function():
name = input("What is your name?")
print("Hello", name)
if __name__ == '__main__':
your_function()
| true |
ebf9be3eeedd4c901f8464a62bf58785cb6b4ec1 | Python | samshipengs/AlgoTool | /A1/fibonacci_huge/fibonacci_huge.py | UTF-8 | 535 | 3.25 | 3 | [] | no_license | # Uses python3
import sys
def get_fibonaccihuge(n, m):
if m == 1:
return 0
else:
N = m*10
F = [0]*(N+1)
F[0] = 0
F[1] = 1
F_mod = [0]*(N+1)
F_mod[0] = 0
F_mod[1] = 1
for i in range(2,N+1):
F[i] = F[i-1] + F[i-2]
F_mod[i] = F[i] % m
rep = int(i/2)+1
if F_mod[:rep] == F_mod[rep:i+1]:
break
res_ind = n % (int(i/2)+1)
result = F[res_ind] % m
return result
return 0
if __name__ == '__main__':
input = sys.stdin.read();
n, m = map(int, input.split())
print(get_fibonaccihuge(n, m))
| true |
f87d18e9e6ffd0647dccba96f2d9c331eb71baff | Python | Aasthaengg/IBMdataset | /Python_codes/p03071/s328507384.py | UTF-8 | 105 | 2.765625 | 3 | [] | no_license | a,b=map(int,input().split())
ans=max(a,b)
if ans==a:
a=a-1
else:
b=b-1
ans+=max(a,b)
print(ans)
| true |
7e8e3e64234bec55f36c8a9621bf6edce029fd42 | Python | gregburek/Coding-Out-of-a-Wet-Paper-Bag | /pythonchallenge.com/3.py | UTF-8 | 863 | 2.734375 | 3 | [] | no_license | equality_file = open('equality.html')
#equality_file = ['mkPytpvUSvuPtLFmkeKQIiWNNNaJouCnyPyiaRBSYuvMtBXylHWIKkexawFeNwjIpTJBImSUXiAAAipljptIj']
rare_chars = ''
answer = ''
for line in equality_file:
for char in line:
if char.isalpha() == False:
continue
if len(rare_chars) < 3 and char.isupper():
rare_chars = rare_chars + char
elif len(rare_chars) == 3 and char.islower():
rare_chars = rare_chars + char
elif len(rare_chars) > 3 and len(rare_chars) < 7 and char.isupper():
rare_chars = rare_chars + char
elif len(rare_chars) == 7 and char.islower():
answer = answer + rare_chars[3]
rare_chars = ''
else:
rare_chars = ''
print answer
import re
mess = open("re.html").read()
print ''.join(re.findall('[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]',
mess))
# output:
#lycdinqffkxedlyaawusnogssotgrw
#linkedlist
| true |
722a891f2028b8f9f1a96b1f5abfcbac77cf625f | Python | MlvPrasadOfficial/KaggleNoteboooks_of_Projects | /4 jigsaw/lightgbm-fast-compact-solution.py | UTF-8 | 9,253 | 2.59375 | 3 | [
"MIT"
] | permissive | #import modules
import numpy as np
import pandas as pd
from contextlib import contextmanager
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack
import time
import re
import string
from scipy.sparse import csr_matrix
from sklearn.preprocessing import MinMaxScaler
import lightgbm as lgb
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
import gc
from collections import defaultdict
import os
import psutil
# Contraction replacement patterns
cont_patterns = [
(b'(W|w)on\'t', b'will not'),
(b'(C|c)an\'t', b'can not'),
(b'(I|i)\'m', b'i am'),
(b'(A|a)in\'t', b'is not'),
(b'(\w+)\'ll', b'\g<1> will'),
(b'(\w+)n\'t', b'\g<1> not'),
(b'(\w+)\'ve', b'\g<1> have'),
(b'(\w+)\'s', b'\g<1> is'),
(b'(\w+)\'re', b'\g<1> are'),
(b'(\w+)\'d', b'\g<1> would'),
]
patterns = [(re.compile(regex), repl) for (regex, repl) in cont_patterns]
@contextmanager
def timer(name):
"""
https://www.kaggle.com/lopuhin/mercari-golf-0-3875-cv-in-75-loc-1900-s
"""
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.0f} s')
def prepare_for_char_n_gram(text):
""" Simple text clean up process"""
# 1. Go to lower case (only good for english)
# Go to bytes_strings as I had issues removing all \n in r""
clean = bytes(text.lower(), encoding="utf-8")
# 2. Drop \n and \t
clean = clean.replace(b"\n", b" ")
clean = clean.replace(b"\t", b" ")
clean = clean.replace(b"\b", b" ")
clean = clean.replace(b"\r", b" ")
# 3. Replace english contractions
for (pattern, repl) in patterns:
clean = re.sub(pattern, repl, clean)
# 4. Drop puntuation
# I could have used regex package with regex.sub(b"\p{P}", " ")
exclude = re.compile(b'[%s]' % re.escape(bytes(string.punctuation, encoding='utf-8')))
clean = b" ".join([exclude.sub(b'', token) for token in clean.split()])
# 5. Drop numbers - as a scientist I don't think numbers are toxic ;-)
clean = re.sub(b"\d+", b" ", clean)
# 6. Remove extra spaces - At the end of previous operations we multiplied space accurences
clean = re.sub(b'\s+', b' ', clean)
# Remove ending space if any
clean = re.sub(b'\s+$', b'', clean)
# 7. Now replace words by words surrounded by # signs
# e.g. my name is bond would become #my# #name# #is# #bond#
# clean = re.sub(b"([a-z]+)", b"#\g<1>#", clean)
clean = re.sub(b" ", b"# #", clean) # Replace space
clean = b"#" + clean + b"#" # add leading and trailing #
return str(clean, 'utf-8')
def count_regexp_occ(regexp="", text=None):
""" Simple way to get the number of occurence of a regex"""
return len(re.findall(regexp, text))
def get_indicators_and_clean_comments(df):
"""
Check all sorts of content as it may help find toxic comment
Though I'm not sure all of them improve scores
"""
# Count number of \n
df["ant_slash_n"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\n", x))
# Get length in words and characters
df["raw_word_len"] = df["comment_text"].apply(lambda x: len(x.split()))
df["raw_char_len"] = df["comment_text"].apply(lambda x: len(x))
# Check number of upper case, if you're angry you may write in upper case
df["nb_upper"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[A-Z]", x))
# Number of F words - f..k contains folk, fork,
df["nb_fk"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[Ff]\S{2}[Kk]", x))
# Number of S word
df["nb_sk"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[Ss]\S{2}[Kk]", x))
# Number of D words
df["nb_dk"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"[dD]ick", x))
# Number of occurence of You, insulting someone usually needs someone called : you
df["nb_you"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\W[Yy]ou\W", x))
# Just to check you really refered to my mother ;-)
df["nb_mother"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\Wmother\W", x))
# Just checking for toxic 19th century vocabulary
df["nb_ng"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\Wnigger\W", x))
# Some Sentences start with a <:> so it may help
df["start_with_columns"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"^\:+", x))
# Check for time stamp
df["has_timestamp"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\d{2}|:\d{2}", x))
# Check for dates 18:44, 8 December 2010
df["has_date_long"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\D\d{2}:\d{2}, \d{1,2} \w+ \d{4}", x))
# Check for date short 8 December 2010
df["has_date_short"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\D\d{1,2} \w+ \d{4}", x))
# Check for http links
df["has_http"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"http[s]{0,1}://\S+", x))
# check for mail
df["has_mail"] = df["comment_text"].apply(
lambda x: count_regexp_occ(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+', x)
)
# Looking for words surrounded by == word == or """" word """"
df["has_emphasize_equal"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\={2}.+\={2}", x))
df["has_emphasize_quotes"] = df["comment_text"].apply(lambda x: count_regexp_occ(r"\"{4}\S+\"{4}", x))
# Now clean comments
df["clean_comment"] = df["comment_text"].apply(lambda x: prepare_for_char_n_gram(x))
# Get the new length in words and characters
df["clean_word_len"] = df["clean_comment"].apply(lambda x: len(x.split()))
df["clean_char_len"] = df["clean_comment"].apply(lambda x: len(x))
# Number of different characters used in a comment
# Using the f word only will reduce the number of letters required in the comment
df["clean_chars"] = df["clean_comment"].apply(lambda x: len(set(x)))
df["clean_chars_ratio"] = df["clean_comment"].apply(lambda x: len(set(x))) / df["clean_comment"].apply(
lambda x: 1 + min(99, len(x)))
with timer("Reading input files"):
train = pd.read_csv('../input/train.csv',usecols=['comment_text','target','id']).fillna(' ')
test = pd.read_csv('../input/test.csv').fillna(' ')
with timer("Performing basic NLP"):
get_indicators_and_clean_comments(train)
get_indicators_and_clean_comments(test)
# Scaling numerical features with MinMaxScaler though tree boosters don't need that
with timer("Creating numerical features"):
num_features = [f_ for f_ in train.columns
if f_ not in ["comment_text", "clean_comment", "id", "remaining_chars",
'has_ip_address', 'target']]
skl = MinMaxScaler()
train_num_features = csr_matrix(skl.fit_transform(train[num_features]))
test_num_features = csr_matrix(skl.fit_transform(test[num_features]))
# Get TF-IDF features
train_text = train['clean_comment']
test_text = test['clean_comment']
all_text = pd.concat([train_text, test_text])
# First on real words
with timer("Tfidf on word"):
word_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
ngram_range=(1, 2),
max_features=30000)
word_vectorizer.fit(all_text)
train_word_features = word_vectorizer.transform(train_text)
test_word_features = word_vectorizer.transform(test_text)
del word_vectorizer
gc.collect()
del train_text
del test_text
gc.collect()
# Now stack TF IDF matrices
with timer("Staking matrices"):
csr_trn = hstack(
[
train_word_features,
train_num_features
]
).tocsr()
gc.collect()
csr_sub = hstack(
[
test_word_features,
test_num_features
]
).tocsr()
gc.collect()
submission = pd.DataFrame.from_dict({'id': test['id']})
del test
gc.collect()
# Set LGBM parameters
params = {
"objective": "binary",
'metric': {'auc'},
"boosting_type": "gbdt",
"verbosity": -1,
"num_threads": 4,
# "bagging_fraction": 0.8,
"feature_fraction": 0.8,
"learning_rate": 0.1,
"verbose": -1,
"min_split_gain": .1,
"reg_alpha": .1,
"max_bin" : 512,
"num_leaves":64,
}
trn_lgbset = lgb.Dataset(csr_trn, free_raw_data=False)
train_target = np.where(train["target"] > 0.5,1,0)
with timer("Predicting probabilities"):
trn_lgbset.set_label(train_target)
# Train lgb
model = lgb.train(
params=params,
train_set=trn_lgbset,
num_boost_round=700,
)
submission["prediction"] = model.predict(csr_sub, num_iteration=model.best_iteration)
submission.to_csv("submission.csv", index=False)
# TODO : improved
#DONE : Basic Working
# Refrence :https://www.kaggle.com/ogrellier/lgbm-with-words-and-chars-n-gram
# If you ave any idea for improvement comment below :) Happy kaggleing | true |
b4abcba4e96b9e43e52da9c13d159ce8cc2b8649 | Python | groupdocs-comparison-cloud/groupdocs-comparison-cloud-python-samples | /Examples/AdvancedUsage/CustomizeChangesStyles.py | UTF-8 | 1,708 | 2.625 | 3 | [
"MIT"
] | permissive | # Import modules
import groupdocs_comparison_cloud
from Common import Common
# This example demonstrates how to compare documents with customizing changes styles
class CustomizeChangesStyles:
@classmethod
def Run(cls):
api_instance = groupdocs_comparison_cloud.CompareApi.from_config(Common.GetConfig())
source = groupdocs_comparison_cloud.FileInfo()
source.file_path = "source_files/word/source.docx"
target = groupdocs_comparison_cloud.FileInfo()
target.file_path = "target_files/word/target.docx"
options = groupdocs_comparison_cloud.ComparisonOptions()
options.source_file = source
options.target_files = [target]
options.output_path = "output/result.docx"
settings = groupdocs_comparison_cloud.Settings()
settings.inserted_items_style = groupdocs_comparison_cloud.ItemsStyle()
settings.inserted_items_style.highlight_color = "14297642"
settings.inserted_items_style.font_color = "16711680"
settings.inserted_items_style.underline = True
settings.deleted_items_style = groupdocs_comparison_cloud.ItemsStyle()
settings.deleted_items_style.font_color = "14166746"
settings.deleted_items_style.bold = True
settings.changed_items_style = groupdocs_comparison_cloud.ItemsStyle()
settings.changed_items_style.font_color = "14320170"
settings.changed_items_style.italic = True
options.settings = settings
request = groupdocs_comparison_cloud.ComparisonsRequest(options)
response = api_instance.comparisons(request)
print("Output file link: " + response.href) | true |
7a2aa8ba12cfa2a053275609e192c321ba741cb7 | Python | hsstock/hsstock | /hsstock/vnpy/event/event_type.py | UTF-8 | 553 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | # encoding: UTF-8
'''
本文件仅用于存放对于事件类型常量的定义。
由于python中不存在真正的常量概念,因此选择使用全大写的变量名来代替常量。
这里设计的命名规则以EVENT_前缀开头。
常量的内容通常选择一个能够代表真实意义的字符串(便于理解)。
建议将所有的常量定义放在该文件中,便于检查是否存在重复的现象。
'''
EVENT_TIMER = 'eTimer' # 计时器事件,每隔1秒发送一次
EVENT_TIMER2 = 'eTimer2'
EVENT_TIMER3 = 'eTimer3'
| true |
3a49c13b8bd80ae724fa84b0ccb32c24f9278b06 | Python | WellersonPrenholato/Maratona-UFV | /machinelearning.py | UTF-8 | 390 | 3.234375 | 3 | [] | no_license |
p = ['capivara', 'capivaro','capivarista', 'capivaristo']
def resp(lines):
for line in lines:
for palavra in p:
if ( line.find(palavra) >= 0 ):
return 'YES'
return 'NO'
lines = []
while True:
try:
line = input()
line = line.lower()
lines.append(line)
except:
break
print(resp(lines)) | true |
b333b2f62a3e066c8c243e06d8e119c85fb77565 | Python | thaus03/Exercicios-Python | /Aula07/Desafio010.py | UTF-8 | 282 | 4.25 | 4 | [] | no_license | # Crie um programa que leia quanto dinheiro a pessoa tem na carteira e mostre quantos dólares ela pode comprar.
# Considere:
# US$ 1,00 = R$3,27
dinheiro = float(input('Informe quanto dinheiro você possui: '))
print(f'Você pode comprar \033[32m{dinheiro//3.27}\033[m dólares')
| true |
d7d212e90abc70e82a41902ab3f6c775f7684214 | Python | 15194779206/practice_tests | /education/B:oldBoy/2不懂知识点汇总/1文件的读与写/8:with.py | UTF-8 | 127 | 2.75 | 3 | [] | no_license | with open("yesterday2",'r',encoding="utf-8") as f:
#相当于f=open("yesterday",'r',encoding="utf-8")
print(f.readline()) | true |
1866300f2b8eb3e29d30586fc0ec9a16d0738a39 | Python | porcpine1967/aoe2_comparisons | /utils/sample.py | UTF-8 | 3,230 | 2.75 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python
""" Build sample data sets. """
import argparse
import concurrent.futures
import csv
import pathlib
import random
import time
import utils.solo_models
import utils.team_models
ROOT_DIR = str(pathlib.Path(__file__).parent.parent.absolute())
def get_record(n):
return n.to_record()
def matches(module, chunksize=200):
""" Splits matches into model, verification, and test data sets and writes to separate files. """
matches = module.Match.all()
print('{} matches'.format(len(matches)))
random.shuffle(matches)
# First 80% go to model
model_edge = int(len(matches)*.8)
# Second 10% go to verification
verification_edge = int(len(matches)*.9)
start = time.time()
# Test
print('{} matches for test'.format(len(matches[verification_edge:])))
match_records = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for match_record in executor.map(get_record, matches[verification_edge:], chunksize=chunksize):
match_records.append(match_record)
print('compiled {} match records for test'.format(len(match_records)))
with open('{}/match_test_data.csv'.format(module.DATA_DIR), 'w') as f:
writer = csv.writer(f)
for record in match_records:
if record[6] > 0:
writer.writerow(record)
print('Test took {} seconds with chunksize {}'.format(int(time.time() - start), chunksize))
# Model
print('{} matches for model'.format(len(matches[:model_edge])))
start = time.time()
match_records = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for match_record in executor.map(get_record, matches[:model_edge], chunksize=chunksize):
match_records.append(match_record)
print('compiled {} match_records for model'.format(len(match_records)))
with open('{}/match_model_data.csv'.format(module.DATA_DIR), 'w') as f:
writer = csv.writer(f)
for record in match_records:
if record[6] > 0:
writer.writerow(record)
print('Model took {} seconds with chunksize {}'.format(int(time.time() - start), chunksize))
# Verification
print('{} matches for verification'.format(len(matches[model_edge:verification_edge])))
start = time.time()
match_records = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for match_record in executor.map(get_record, matches[model_edge:verification_edge], chunksize=chunksize):
match_records.append(match_record)
print('compiled {} match_records for verification'.format(len(match_records)))
with open('{}/match_verification_data.csv'.format(module.DATA_DIR), 'w') as f:
writer = csv.writer(f)
for record in match_records:
if record[6] > 0:
writer.writerow(record)
print('Verification took {} seconds with chunksize {}'.format(int(time.time() - start), chunksize))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('klass', choices=('team', 'solo',), help="team or solo")
args = parser.parse_args()
if args.klass == 'team':
matches(utils.team_models)
else:
matches(utils.solo_models)
| true |
695996d2bd619a61bbbe2bb207e17d3d978cfbed | Python | keith-packard/altusmetrum | /packages/RN4678.py | UTF-8 | 4,586 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
# Copyright 2016 by Bdale Garbee <bdale@gag.com>. GPLv3+
#
# Program to emit PCB footprint for Microchip RN4678 Bluetooth LE module
#
# dimensions in mm from BM70/71 Data Sheet (part of the same family)
BodyWidth = 12.00
BodyHeight = 22.00
GndEdgeLine = 18.00
PinWidth = 0.7
PinHeight = 1.5
PinSpacing = 1.1
PinOffset = 0.5
import sys
# we're going to use the 1/100 of a mil fundamental unit form
def mm2mils100( mm ):
return int( mm / 25.4 * 1000.0 * 100.0 + 0.5 )
print '# author: Bdale Garbee'
print '# email: bdale@gag.com'
print '# dist-license: GPL 3+'
print '# use-license: unlimited'
print 'Element[0x0 "BM70BLES1FC2" "" "" 0 0 -10161 -12011 0 100 0x0]'
print "("
print ' Pad[',\
mm2mils100(BodyHeight - 21.2), \
mm2mils100(BodyWidth + PinOffset - PinWidth/2), \
mm2mils100(BodyHeight - 21.2), \
mm2mils100(BodyWidth + PinOffset - PinHeight + PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin1" "1" 0x0100]'
print ' Pad[',\
mm2mils100(BodyHeight - 21.2), \
mm2mils100(-PinOffset + PinWidth/2), \
mm2mils100(BodyHeight - 21.2), \
mm2mils100(-PinOffset + PinHeight - PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin33" "33" 0x0100]'
print ' Pad[',\
mm2mils100(BodyHeight - 20.1), \
mm2mils100(BodyWidth + PinOffset - PinWidth/2), \
mm2mils100(BodyHeight - 20.1), \
mm2mils100(BodyWidth + PinOffset - PinHeight + PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin2" "2" 0x0100]'
print ' Pad[',\
mm2mils100(BodyHeight - 20.1), \
mm2mils100(-PinOffset + PinWidth/2), \
mm2mils100(BodyHeight - 20.1), \
mm2mils100(-PinOffset + PinHeight - PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin32" "32" 0x0100]'
print ' Pad[',\
mm2mils100(BodyHeight - 13.9), \
mm2mils100(BodyWidth + PinOffset - PinWidth/2), \
mm2mils100(BodyHeight - 13.9), \
mm2mils100(BodyWidth + PinOffset - PinHeight + PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin3" "3" 0x0100]'
print ' Pad[',\
mm2mils100(BodyHeight - 12.8), \
mm2mils100(BodyWidth + PinOffset - PinWidth/2), \
mm2mils100(BodyHeight - 12.8), \
mm2mils100(BodyWidth + PinOffset - PinHeight + PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin4" "4" 0x0100]'
for pin in range (5,15):
print ' Pad[',\
mm2mils100(BodyHeight - 11.7 + ((pin - 5) * PinSpacing)), \
mm2mils100(BodyWidth + PinOffset - PinWidth/2), \
mm2mils100(BodyHeight - 11.7 + ((pin - 5) * PinSpacing)), \
mm2mils100(BodyWidth + PinOffset - PinHeight + PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin%i"' % pin, '"%i"' % pin, '0x0100]'
print ' Pad[',\
mm2mils100(BodyHeight - 11.7 + ((pin - 5) * PinSpacing)), \
mm2mils100(- PinOffset + PinWidth/2), \
mm2mils100(BodyHeight - 11.7 + ((pin - 5) * PinSpacing)), \
mm2mils100(- PinOffset + PinHeight - PinWidth/2), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin%i"' % (36 - pin), '"%i"' % (36 - pin), '0x0100]'
for pin in range (15,22):
print ' Pad[',\
mm2mils100(BodyHeight + PinOffset - PinWidth/2), \
mm2mils100(BodyWidth - 2.7 + (-(pin - 15) * PinSpacing)), \
mm2mils100(BodyHeight - PinHeight + PinOffset + PinWidth/2), \
mm2mils100(BodyWidth - 2.7 + (-(pin - 15) * PinSpacing)), \
mm2mils100(PinWidth), \
mm2mils100(PinSpacing - PinWidth), \
mm2mils100(PinWidth)+600, \
'"pin%i"' % pin, '"%i"' % pin, '0x0100]'
# body outline
print ' ElementLine[',\
0, \
0, \
0, \
mm2mils100(BodyWidth), \
'1000 ]'
print ' ElementLine[',\
0, \
mm2mils100(BodyWidth), \
mm2mils100(BodyHeight), \
mm2mils100(BodyWidth), \
'1000 ]'
print ' ElementLine[',\
mm2mils100(BodyHeight), \
mm2mils100(BodyWidth), \
mm2mils100(BodyHeight), \
0, \
'1000 ]'
print ' ElementLine[',\
mm2mils100(BodyHeight), \
0, \
0, \
0, \
'1000 ]'
# hash marks where gnd plane should end
print ' ElementLine[',\
mm2mils100(BodyHeight-GndEdgeLine), \
mm2mils100(BodyWidth-1), \
mm2mils100(BodyHeight-GndEdgeLine), \
mm2mils100(BodyWidth-3), \
'1000 ]'
print ' ElementLine[',\
mm2mils100(BodyHeight-GndEdgeLine), \
mm2mils100(1), \
mm2mils100(BodyHeight-GndEdgeLine), \
mm2mils100(3), \
'1000 ]'
print ")"
| true |
957be96cead5e84ca005ea77e241a1739146d9af | Python | harveylabis/GTx_CS1301 | /codes/CHAPTER_4/Chapter_4.2_Strings/Lesson_5/Split-4.py | UTF-8 | 65 | 3.5 | 4 | [] | no_license | names = input("Enter a list of names: ")
print(names.split(","))
| true |
a501d5195f297d7141289e8fd7cdb3fdc3cc91df | Python | Jiao-Jia-Xiong/path-finding | /demo/visualization_for_nodes.py | UTF-8 | 4,753 | 3.1875 | 3 | [] | no_license | import pygame as pg
from demo_map import nodes_map, Node
from typing import Tuple, List
from random import randint
pg.init()
def get_nodes_position(node: Node,
ox: int,
oy: int,
sqr_len: int) -> Tuple[int, int]:
"""return nodes position in a pygame window"""
x_pos = ox + node.loc[0] * sqr_len
y_pos = oy + node.loc[1] * sqr_len
return x_pos, y_pos
def get_screen_size(ox: int,
oy: int,
nodes_map: List[List[Node]],
sqr_len: int) -> Tuple[int, int]:
"""Return screen size using starting x and y axises and the size of
<nodes_map> and each nodes square len"""
width = ox + len(nodes_map) * sqr_len
height = oy + len(nodes_map[0]) * sqr_len
return width, height
def which_node(mouse_x: int, mouse_y: int) -> Tuple[int, int]:
"""return cursor is currently on which node"""
node_x = (mouse_x - ox) // sqr_len
node_y = (mouse_y - oy) // sqr_len
return node_x, node_y
def get_quit_button_rect(ox: int,
screen_height: int) -> pg.Rect:
"""Using original x axis and screen height to get where quit button should
be
"""
left = int(ox * (1 / 5))
right = int(ox * (4 / 5))
top = int(screen_height * (44 / 50))
bottom = int(screen_height * (48 / 50))
return pg.Rect(left, top, right - left, bottom - top)
ox, oy = 100, 0
sqr_len = 40
blue = (66, 135, 245)
tile_edge_color = (245, 173, 66)
tile_color = (245, 203, 66)
white = (255, 255, 255)
green = (172, 219, 121)
red = (245, 96, 66)
clock = pg.time.Clock()
fps = 10
scree_width, screen_height = get_screen_size(ox, oy, nodes_map, sqr_len)
quit_rect = get_quit_button_rect(ox, screen_height)
grass = pg.image.load('grass.jpg')
wall = pg.image.load('wall.jpg')
grass = pg.transform.scale(grass, (sqr_len, sqr_len))
wall = pg.transform.scale(wall, (sqr_len, sqr_len))
font = pg.font.SysFont(None, 25)
quit_msg = font.render('Quit', True, white)
def main():
pg.display.set_caption('Demo Map')
screen = pg.display.set_mode((scree_width, screen_height))
screen.fill(blue)
screen.fill(red, quit_rect)
screen.blit(quit_msg,
[quit_rect.left + quit_rect.width / 2 - quit_msg.get_size()[
0] / 2,
quit_rect.top + quit_rect.height / 2 - quit_msg.get_size()[
1] / 2])
node_rects = []
for node_column in nodes_map:
rect_column = []
for node in node_column:
x_pos, y_pos = get_nodes_position(node, ox, oy, sqr_len)
rect = pg.Rect(x_pos, y_pos, sqr_len, sqr_len)
# screen.fill(tile_color, rect)
# rect_column.append(pg.draw.rect(screen, tile_color,
# rect))
rect_column.append(screen.blit(grass, rect))
pg.display.update()
# pg.event.get()
# clock.tick(10)
node_rects.append(rect_column)
assert type(node_rects) is list
for column in node_rects:
assert type(column) is list
for rect in column:
assert type(rect) is pg.Rect
done = False
while not done:
for event in pg.event.get():
if event.type == pg.QUIT:
done = True
if event.type == pg.MOUSEBUTTONDOWN:
button = pg.mouse.get_pressed()
mouse_x, mouse_y = pg.mouse.get_pos()
if mouse_x >= ox and mouse_y >= oy:
node_x, node_y = which_node(mouse_x, mouse_y)
if button[0]:
if nodes_map[node_x][node_y].can_pass:
nodes_map[node_x][node_y].can_pass = False
rect = node_rects[node_x][node_y]
screen.blit(wall, rect)
pg.display.update(rect)
elif not nodes_map[node_x][node_y].can_pass:
nodes_map[node_x][node_y].can_pass = True
rect = node_rects[node_x][node_y]
screen.blit(grass, rect)
pg.display.update(rect)
else:
if quit_rect.left <= mouse_x <= quit_rect.left + quit_rect.width and \
quit_rect.top <= mouse_y <= quit_rect.top + quit_rect.height:
done = True
if __name__ == '__main__':
x, y = 8, 8
assert nodes_map[x][y].down is nodes_map[x][y + 1]
assert nodes_map[x][y].up is nodes_map[x][y - 1]
assert nodes_map[x][y].left is nodes_map[x - 1][y]
assert nodes_map[x][y].right is nodes_map[x + 1][y]
main()
pg.quit()
| true |
591156bef70cf0d4574193311acb42c6efd45d1c | Python | GyuriKim12/CodingTestStudy | /choigoun/1week/1920.py | UTF-8 | 1,711 | 3.796875 | 4 | [] | no_license | # # binary search를 안 쓰면 런타임 에러
# class Stack:
# def __init__(self):
# self.list =[]
# def push(self,item):
# self.list.append(item)
# def pop(self):
# # 비어 있지 않다면
# if not self.isEmpty():
# return self.list.pop(-1)
# else:
# return -1
# def isEmpty(self):
# return len(self.list)==0
# def size(self):
# return len(self.list)
# def top(self):
# if not self.isEmpty():
# return list[len(self.list)]
# else:
# return -1
# num1 = int(input())
# stack = Stack()
# for i in range(num1-1):
# str = input()
# if str in 'push':
# push_num = int(str.split(' ')[1])
# stack.push(push_num)
# if str in 'top':
# print(stack.top())
# if str in 'size':
# print(stack.size())
# if str in 'empty':
# if stack.isEmpty == True:
# print("1")
# else:
# print("0")
# if str in 'pop':
# print(stack.pop())
#Binary Search 이용하기
def BinarySearch(arr, val, low, high):
if low > high:
return False
mid = (low + high) // 2
if arr[mid] > val:
return BinarySearch(arr, val, low, mid - 1)
elif arr[mid] < val:
return BinarySearch(arr, val, mid + 1, high)
else:
return True
num1 = int(input()) # 숫자
numlist = list(map(int,input().split())) # 리스트
M = int(input()) # 몇 줄
M_list = list(map(int,input().split()))
numlist = sorted(numlist) # 오름차순 정렬하기
for m in M_list :
if BinarySearch(numlist,m,0,num1-1):
print(1)
else:
print(0) | true |
8367b2b7185eaf1066cc850705f7397ced8c076a | Python | hernancardoso/p2p-file-sharing | /downloadHandler.py | UTF-8 | 7,267 | 2.609375 | 3 | [] | no_license | import socket
import time
import threading
import sys
import lib.common as utils
import lib.variables as variables
import settings.config as config
serverSocket = ""
threadError = {}
def init():
global serverSocket
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind((config.data["ip"], config.data["tcp_port"]))
serverSocket.listen()
print("TCP socket started")
while True:
print("Waiting for download request...")
connectionSock, addr = serverSocket.accept()
# A new thread is created for each download request
threading.Thread(target=serveClient, args=(connectionSock, addr,),).start()
def serveClient(clientSock, addr):
print("New download request from " + addr[0])
read, remaining = utils.read_line(clientSock, "")
if read == "DOWNLOAD":
md5Code, remaining = utils.read_line(clientSock, remaining)
if md5Code == "CLOSED": return False
reqStart, remaining = utils.read_line(clientSock, remaining)
if reqStart == "CLOSED": return False
reqSize, remaining = utils.read_line(clientSock, remaining)
if reqSize == "CLOSED": return False
if md5Code not in variables.myFiles:
# The requested file could not be found on this device
clientSock.sendall("DOWNLOAD FAILURE\nMISSING\n".encode())
return False
if (
not reqSize.isnumeric()
or not reqStart.isnumeric()
or int(reqStart) > int(variables.myFiles[md5Code]["fileSize"])
):
clientSock.sendall("DOWNLOAD FAILURE\nBAD REQUEST\n".encode())
print("Error 1 - Bad request from client.")
return False
if int(reqStart) + int(reqSize) > int(variables.myFiles[md5Code]["fileSize"]):
clientSock.sendall("DOWNLOAD FAILURE\nBAD REQUEST\n".encode())
print("Error 2 - Offset + requested size exceeds the original file size")
return False
f = open(config.data["shared_folder"] + "\\" + variables.myFiles[md5Code]["fileName"], "rb")
f.seek(int(reqStart))
buffer = "DOWNLOAD OK\n".encode()
buffer += f.read(int(reqSize))
print("Sending " + str(sys.getsizeof(buffer)) + " bytes")
clientSock.sendall(buffer)
f.close()
clientSock.shutdown(socket.SHUT_RDWR)
clientSock.close()
def startDownload(md5Code):
fileSize = int(variables.availableFiles[md5Code]["fileSize"])
timeNow = int(round(time.time() * 1000))
# Filter servers and get only the ones that "still alive"
variables.availableFiles[md5Code]["servers"] = { k: v for (k, v) in variables.availableFiles[md5Code]["servers"].items() if timeNow - v <= 90000 }
if len(variables.availableFiles[md5Code]["servers"]) == 0: #There are no servers for that file
#If there are no servers to download the file, then the file should not be listed as available
variables.availableFiles = {k: v for (k, v) in variables.availableFiles.items() if k != md5Code}
variables.errorDownloading = "The requested file is no longer available"
return
# shortcut for availableServers
availableServers = variables.availableFiles[md5Code]["servers"]
totalServers = len(availableServers)
# The chunks size is divided by the number of available server
chunkSize = int(fileSize / totalServers)
# for each available server a new thread is created and stored in this array (t)
# this will be used later to do a fork-join of the threads
t = []
# an index to iterate over t[] array
i = 0
# Index of the actualServer to which i'm requesting the chunk
actualServer = 0
for ip in availableServers:
startingByte = chunkSize * actualServer
# if fileSize is 5 bytes and 2 servers are available 5/2 = 2 bytes per server, so 1 byte is missing
# the remaining of the division will be charged to the last server on the available lsit
if actualServer == totalServers - 1:
chunkSize += fileSize % totalServers
t[i] = threading.Thread(target=downloadChunk, args=(ip, md5Code, startingByte, chunkSize))
t[i].start()
print("Chunk requested to " + ip + "\n")
i += 1
actualServer += 1
# Wait for all threads to end
for i in t:
t[i].join()
def downloadChunk(ip, md5Code, reqByte, reqSize):
global threadError
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.settimeout(8)
try:
clientSocket.connect((ip, config.data["tcp_port"]))
except:
variables.errorDownloading = "Download failure, a server has disconnected, try again"
del variables.availableFiles[md5Code]["servers"][ip]
if len(variables.availableFiles[md5Code]["servers"]) == 0:
del variables.availableFiles[md5Code]
return
print("Connected to " + ip)
request = "DOWNLOAD\n" + md5Code + "\n" + str(reqByte) + "\n" + str(reqSize) + "\n"
clientSocket.sendall(request.encode())
fileName = variables.availableFiles[md5Code]["fileNames"][0]
downloadedSize = 0
f = open(config.data["shared_folder"] + "\\" + fileName, "wb+")
f.seek(reqByte) # seek the pointer to start writing in starting byte
# Repeat until header of DOWNLOAD PROTOCOL is obtained
answer = clientSocket.recv(config.data["tcp_rcv_pkt_max_size"])
while len(answer) < 12:
try:
answer += clientSocket.recv(config.data["tcp_rcv_pkt_max_size"])
except:
variables.errorDownloading = "Download failure, a server has disconnected, try again"
f.close()
clientSocket.close()
return
if answer[:12].decode().find("DOWNLOAD OK\n") != -1:
answer = answer[12:]
downloadedSize += len(answer)
print("Got", str(len(answer)), "-", str(downloadedSize), "/", str(reqSize), " (", str(int(downloadedSize * 100 / reqSize)), ")%")
f.write(answer)
while downloadedSize < reqSize:
if variables.errorDownloading != "":
# thread returned an error
f.close()
clientSocket.close()
return
try:
answer = clientSocket.recv(config.data["tcp_rcv_pkt_max_size"])
except:
variables.errorDownloading = "Download failure, a server has disconnected, try again"
f.close()
clientSocket.close()
return
downloadedSize += len(answer)
print("Got", str(len(answer)), "-", str(downloadedSize), "/", str(reqSize), " (", str(int(downloadedSize * 100 / reqSize)), ")%")
f.write(answer)
print("Download completed")
else:
# else if the message was not DOWNLOAD OK then the message received was DOWNLOAD FAILURE
print(answer.decode())
while answer.decode().count("\n") < 2:
answer += clientSocket.recv(config.data["tcp_rcv_pkt_max_size"])
if answer == 0:
break
variables.errorDownloading = answer.split("\n")[1]
f.close()
clientSocket.close() | true |
bc63c70b43442fe365866d0bcf511b7d7e8e32ab | Python | TakanoriHasebe/DeepLearning | /ManufactureDeepLearning/make-neural-network/make_trainer.py | UTF-8 | 3,909 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 21 09:57:36 2017
@author: Takanori
"""
"""
ニューラルネットワークの訓練を行うクラス
* 課題点
0. 初期化関数群
1. バッチ処理について忘れている
2. バッチ処理とミニバッチ処理について
3. バッチ処理の書き方
4. 勾配の最適化手法の初期化
5. 勾配の更新でパラメータをどこから持ってくるかについて
6. ミニバッチ学習の際の繰り返しの回数の設定について
7. ミニバッチ学習とバッチ学習
8. train_step関数とtrain関数
9. 勾配(optimizer)の書き方
参考url : https://github.com/oreilly-japan/deep-learning-from-scratch/blob/master/common/trainer.py
"""
import sys, os
sys.path.append('../') # 親ディレクトリのファイルをインポートするための設定
from common import optimizer
import numpy as np
from multi_layer_network import MultiLayerNetwork
# ニューラルネットワークの訓練を行うクラス
class Trainer:
def __init__(self, network, x_train, t_train, x_test, t_test, mini_batch_size=100, epochs=20, verbose=True):
self.network = network # MultiLayerNet
self.x_train = x_train # 学習データ
self.t_train = t_train # 学習データ
self.x_test = x_test # テストデータ
self.t_test = t_test # テストデータ
self.train_size = self.x_train.shape[0] # 学習サイズ
self.current_iter = 0 # 現在の繰り返し回数
self.iter_per_epoch = int(max(self.train_size/mini_batch_size, 1)) # 0になることを避ける。ミニバッチ学習の繰り返し回数
self.current_epoch = 0 # 現在のepoch数
self.max_iter = epochs * self.iter_per_epoch # 最終的な繰り返し回数
self.batch_size = mini_batch_size # バッチ学習のサイズ
self.optimizer = optimizer.SGD() # 最適化にSGDを用いる
self.verbose = verbose # 冗長性
self.train_loss_list = list() # 学習時の誤差リスト
self.train_acc_list = list() # 学習データの正確さ
self.test_acc_list = list() # テストデータの正確さ
# 学習の流れ
def train_step(self):
# バッチ学習のデータの用意
batch_mask = np.random.choice(self.train_size, self.batch_size)
x_batch = self.x_train[batch_mask]
t_batch = self.t_train[batch_mask]
# 勾配の算出
grads = self.network.gradient(x_batch, t_batch)
# 勾配の更新
self.optimizer.update(self.network.params, grads)
# 誤差の算出
loss = self.network.loss(x_batch, t_batch)
self.train_loss_list.append(loss)
# 冗長性がTrueであれば表示
if self.verbose : print('train loss:'+str(loss))
# ミニバッチ学習
if self.current_iter % self.iter_per_epoch == 0:
self.current_epoch += 1
self.train_acc_list.append(self.network.accuracy(self.x_train, self.t_train))
self.test_acc_list.append(self.network.accuracy(self.x_test, self.t_test))
if self.verbose : print("=== epoch:" + str(self.current_epoch)+', train acc:'+str(self.train_acc_list[self.current_epoch - 1])+', test acc'+str(self.test_acc_list[self.current_epoch - 1]))
self.current_iter += 1
# 訓練
def train(self):
for i in range(self.max_iter):
self.train_step()
test_acc = self.network.accuracy(self.x_test, self.t_test)
print("=============== Final Test Accuracy ===============")
print("test acc:" + str(test_acc))
return test_acc
| true |
66e77b04861a4045c314c5c2a7dd2191cb2f4f2d | Python | smart8099/Zaana | /static/css/Q2.py | UTF-8 | 326 | 4.28125 | 4 | [
"MIT"
] | permissive | #program to check if a string is palindrome
def check_palindrome(value):
if value == value[::-1] :
print('the string is palindrome')
else:
print('the string is not palindrome')
value = input('enter the string to check whether it is palindrome or not: ')
check_palindrome(value) | true |
e1000b2370e572314aac51b2ae0d163d32a202d9 | Python | Tiagoksio/estudandoPython | /exercicios004/conteA.py | UTF-8 | 502 | 4.40625 | 4 | [] | no_license | '''Faça um programa que leia uma frase pelo teclado e mostre:
Quantas vezes aparece a letra "A";
Em que posição ela aparece pela primeira vez;
Em que posição ela aparece pela última vez.'''
frase = " ".join(input('Informe uma frase: ').lower().split())
print('''A frase: "{}"...
Possui {} letras "A";
A primeira letra "A" aparece na {}ª posição;
A última letra "A" aparece na {}ª posição.'''.format(frase.capitalize(), frase.count("a"), frase.find("a") + 1, frase.rfind("a") + 1)) | true |
56731e7bf62011733f09d85c0dc7a3a302de4403 | Python | huangqing6/RUL-prediction | /codes/feature_selection.py | UTF-8 | 3,448 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score,confusion_matrix
from sklearn.metrics import accuracy_score
import seaborn as sns
# Data preprocessing
training_data = pd.read_csv('train.csv', header=0)
testing_data = pd.read_csv('test.csv', header=0)
training_frame = pd.DataFrame(training_data, columns=training_data.columns)
testing_frame = pd.DataFrame(testing_data, columns=testing_data.columns)
# select feature1-21 from the dataset for feature selection
X = training_frame.iloc[:, 14:35]
X_t = testing_frame.iloc[:, 14:35]
# target column: RUL
y_train = training_frame.iloc[:,8]
y_test = testing_frame.iloc[:,8]
# perform different feature selection methods to get the most
# valuable features
# PCA
def perform_pca(samples, components=20):
"""
samples: training dataset to perform PCA
components: number of PCA components needed. Default 20
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=components)
pca.fit_transform(samples)
#principalDf = pd.DataFrame(data = pricipalComponents, columns=['PC-1', 'PC-2'])
#percentiage of variance explained for each components
print('Explained variance ratio(first two components): %s'%str(pca.explained_variance_ratio_))
#print(principalDf)
#finalDf = pd.concat([principalDf, training_frame[['RUL']]], axis = 1)
print('PCA components:%s'%str(pca.components_))
plt.figure(1, figsize=(8, 10))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_ratio_, linewidth=5)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_ratio_')
# by comparing the absolute value of the weight of each feature,
# feature 1, 6, 7, 8, 12, and 18 can be selected
#perform_pca(X)
# KBest using chi-square
def perform_kbest(x, y, feature_num):
"""
Perform a filter method kbest for feature selection.
x: training dataset
y: target dataset
feature_num: number of features need to be selected.
"""
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
#feature extraction: Chi-squared test, select best features
feature = SelectKBest(score_func=chi2, k=feature_num).fit(x, y)
# #fit = test.fit(X, y)
np.set_printoptions(precision=3)
print('Score list:', feature.scores_)
selected_features = feature.transform(x)
#selected_features_test = feature.transform(X_t)
print('Selected feature list:',selected_features[0:feature_num, :])
#perform_kbest(X, y_train, 6)# 3, 4, 7, 9, 12, 18 are selected
# REF
def perform_ref(x, y, feature_num):
"""
x: training dataset
y: target dataset
feature_num: number of features need to be selected.
"""
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.svm import SVC
estimator = SVC(kernel = "linear")
selector = RFECV(estimator, step=1, cv=5)
#rfe = RFE(estimator=clf, n_features_to_select=feature_num, step=1)
selector = selector.fit(x, y)
#clf =clf.fit(selected_features, y_train)
print('Chosen best 5 feature by rfe:',X.columns[selector.support_])
| true |
6492c8add49cfa4f10228adebf3c2d54bcc3d555 | Python | victorltd/DEV_python | /Repeticao/loops.py | UTF-8 | 1,449 | 4.65625 | 5 | [] | no_license | # Exemplo das estruturas de repitções em Python
# Primeiro vamos ver como funciona o FOR
# Se temos uma Lista de nomes, números e queremos acessar esses valores um por um fazemos o seguinte
nomes = ['Ramon', 'André', 'Leon', 'Victor', 'Matheus']
for i in nomes: # Podemos observar que o valor de saída é o valor que está na lista
print(i) # Se fossem números nessa lista funcionaria da mesma maneira imprimindo cada numero da lista
# Para iterar n vezes o for também pode ser da seguinte maneira
print() # Só para pular uma linha
print('Loop For')
n = 10
for j in range(0,n): # OBS: o range em Python vai contar do primeiro valor, nesse caso 0, até n-1, ou seja, de 0 a 9
print(j)
# Caso queria que o passo do for seja negativo
print() # Só para pular uma linha
print('For incremento negativo')
for k in range(n,1,-1): # Observe que ele começa em 10 e vai até 2
print(k)
# Para o laço de repetição while temos a condição de parada para que o algoritmo não seja um loop infinito
# Observe a seguir:
print() # Só para pular uma linha
print("Loop While")
contador = 0
while contador < 5:
print(contador)
contador += 1
# Caso precise entrar num loop antes satisfazer uma condição de parada, semelhante um "do while", podemos fazer assim:
print('Outra maneira de usar o While')
cont = 0
while True:
print(cont)
cont += 1
if cont == 10:
break
| true |
602d5bb3ab45f4cd410b93bd5d3604f705cd0c7e | Python | yyfxm/pyblackhat | /chp3/sniffer.py | UTF-8 | 389 | 2.59375 | 3 | [] | no_license | import socket
import os
#host
host = "192.168.1.104"
#create orginal socket and bind in public port
if os.name == "nt":
socket.protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
sniffer = socket.socket(socket.AF_INET,socket.SOCK_RAW,socket_protocol)
sniffer.bind((host,0))
sniffer.setsockopt(socket.IPPROTO_IP,socket.IP_HDRINCL,1)
print(sniffer.recvfrom(65565))
| true |
595f5effb6a4834b48060ccafca7f49105fcec50 | Python | hybae430/Jungol | /LC/119_디버깅_형성평가4.py | UTF-8 | 154 | 3.125 | 3 | [] | no_license | from datetime import datetime
now = datetime.now()
a = 0
print(a, end=" ")
a = now.year - 1900
print(a, end=" ")
a += now.month - 1
a += now.day
print(a) | true |
2ce18c2284afa79c390fd820aef449874867b8d2 | Python | ralsuwaidi/BotMother | /utils/common.py | UTF-8 | 214 | 3.296875 | 3 | [] | no_license | import random
def random_line(file) -> str:
"""gets random line from file"""
lines = []
with open(file) as f:
lines = f.read().splitlines()
return lines[random.randint(0, len(lines)-1)]
| true |
b1945638159349d0cb109c53f17a4af7aa302114 | Python | dykesk/Plant_EnergySE | /src/plant_energyse/openwind/rwTurbXML.py | UTF-8 | 17,398 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | # rwTurbXML.py
# 2014 03 28
# Read and write turbine XML (*.owtg) files
# - created by merging rdTurbXML.py and wrtTurbXML.py
'''
Read/write XML tree that conforms to the OpenWind TurbineType XML format
G. Scott, NREL 2013 07 09
2014 03 24: updated documentation
USAGE (reading):
import rwTurbXML
fname = 'turbfile.owtg'
tree = rwTurbXML.parseOWTG(fname)
vels, pwrs = rwTurbXML.getPwrTbls(tree)
vels, thrusts = rwTurbXML.getThrustTbls(tree)
vels, rpms = rwTurbXML.getRPMTbls(tree)
USAGE (writing):
import rwTurbXML
turbtree = rwTurbXML.newTurbTree(trbname, desc,
vels, power, thrust, rpm, hubht, rdiam, rho=[1.225], percosts=[],
cutInWS=3.0, cutOutWS=25.0, nblades=3, machineRating=3000.0,
ttlCost=2000000, fndCost=100000):
turbXML = etree.tostring(turbtree,
xml_declaration=True,
doctype='<!DOCTYPE {:}>'.format(trbname),
pretty_print=True)
ofh = open('myTurbine.owtg', 'w')
ofh.write(turbXML)
ofh.close()
USAGE (modifying):
modTurbXML(oldTurbFile, newTurbFile, rotor_diameter=rdiam)
'''
#-------------------------------------------------------
import sys, os
from lxml import etree
import matplotlib.pyplot as plt
# ----------- READING -----------------
#---------------------------------------------------------
def parseOWTG(fname,debug=False):
# parse an OpenWind XML turbine file and return the etree
try:
tree = etree.parse(fname)
return tree
except:
sys.stderr.write('\n*** ERROR in parseOWTG() opening/reading/parsing file {:}\n\n'.format(fname))
return None
#---------------------------------------------------------
def getTurbParams(tree,debug=False):
name = ''
capKW = 0
hubHt = 0
rtrDiam = 0
root = tree.getroot()
name = root.find('Name').text
try:
hubHt = float(root.find('HubHeight').get('value'))
except:
sys.stderr.write("getTurbParams: HubHeight not found\n")
try:
capKW = float(root.find('CapacityKW').get('value'))
except:
sys.stderr.write("getTurbParams: HubHeight not found\n")
try:
rtrDiam = float(root.find('RotorDiameter').get('value'))
except:
sys.stderr.write("getTurbParams: HubHeight not found\n")
return name, capKW, hubHt, rtrDiam
#---------------------------------------------------------
def getTbls(tree,tblname,debug=False):
''' extract and return tables from turbine XML structure
Table must have <Type>TurbineTable</Type>
getTbls is usually called from getPwrTbls(), getThrustTbls(), getRPMTbls()
'''
vels = []
pwrs = []
# parse Velocities section
vel = tree.xpath('/'.join([tblname,'Velocities']))[0]
vcnt = int(tree.xpath('/'.join([tblname,'Velocities/Count']))[0].attrib['value'])
ivel = 0
vels = []
for child in vel:
ctg = child.tag
if ctg.startswith('Count'):
nvel = int(child.attrib['value'])
vels = [None for ii in range(nvel)]
if ctg.startswith('Velocity'):
vels[ivel] = float(child.attrib['value'])
if debug:
print '{:12s} {:5.1f}'.format(child.tag, vels[ivel] )
ivel += 1
# parse AirDensities section
adens = tree.xpath('/'.join([tblname,'AirDensities']))[0]
nrho = int(tree.xpath('/'.join([tblname,'AirDensities/Count']))[0].attrib['value'])
if debug:
print 'Found {:} air densities'.format(nrho)
# parse Values section
ipwr = 0
pwrs = []
vals = tree.xpath('/'.join([tblname,'Values']))[0]
for child in vals:
ctg = child.tag
if ctg.startswith('Rho'):
rho = float(ctg[3:])
if debug:
print 'Rho {:5.3f}'.format(rho)
for rchld in child:
rctg = rchld.tag
if rctg.startswith('Rows'):
npwr = int(rchld.attrib['value'])
pwrs = [None for ii in range(npwr)]
if rctg.startswith('v'):
pwrs[ipwr] = float(rchld.attrib['value'])
if debug:
print '{:12s} {:5.1f}'.format(rctg, pwrs[ipwr] )
ipwr += 1
return vels, pwrs
#---------------------------------------------------------
def getPwrTbls(tree,debug=False):
''' extract and return power tables from turbine XML structure '''
ptbls = '//Power_Tables'
for record in tree.xpath(ptbls):
cnts = record.xpath('/'.join([ptbls,'Count']))
ntbl = int(cnts[0].attrib['value'])
if debug:
sys.stderr.write('Found {:} {:} tables\n'.format(ntbl, ptbls))
for itbl in range(ntbl):
tname = 'Power_Table{:}'.format(itbl)
vels, pwrs = getTbls(record,tname,debug=debug)
return vels, pwrs
#---------------------------------------------------------
def getThrustTbls(tree,debug=False):
''' extract and return thrust table from turbine XML structure '''
ttbls = '//Thrust_Table'
vels, tsts = getTbls(tree,ttbls,debug=debug)
return vels, tsts
#---------------------------------------------------------
def getRPMTbls(tree,debug=False):
''' extract and return RPM table from turbine XML structure '''
rtbls = '//RPM_Table'
vels, rpms = getTbls(tree,rtbls,debug=debug)
return vels, rpms
# ----------- WRITING -----------------
#---------------------------------------------------
def newTurbTree(trbname, desc,
vels, power, thrust, rpm, hubht, rdiam, rho=[1.225], percosts=[],
cutInWS=3.0, cutOutWS=25.0, nblades=3, machineRating=3000.0,
ttlCost=2000000, fndCost=100000):
'''
trbname : short one-word turbine name
desc : turbine description string
vels[] : wind speeds in mps at 1.0 mps intervals, starting with 0.0 mps
power[] : turbine power output in kW at 1.0 mps intervals, starting with 0.0 mps
thrust[] : turbine thrust coefficient at 1.0 mps intervals, starting with 0.0 mps
rpm[] : turbine rpm at 1.0 mps intervals, starting with 0.0 mps
hubht : turbine hub height in m
rdiam : turbine rotor diameter in m
percosts : array of PerCost objects representing periodic costs
'''
turbtree = etree.Element(trbname)
# Need to set these to actual values
tname = etree.SubElement(turbtree, "Name")
tname.text = desc
#tname.text = 'OpenWind Test Turbine'
hh = etree.SubElement(turbtree, "HubHeight", value='{:.0f}'.format(hubht))
rd = etree.SubElement(turbtree, "RotorDiameter", value='{:.0f}'.format(rdiam))
vv = etree.SubElement(turbtree, "VoltageV", value='690')
xx = etree.SubElement(turbtree, "CapacityKW", value='{:.0f}'.format(machineRating))
xx = etree.SubElement(turbtree, "IsPitchControlled", value="1")
xx = etree.SubElement(turbtree, "LowCutIn", value='{:.0f}'.format(cutInWS)) # "3")
xx = etree.SubElement(turbtree, "HighCutOut", value='{:.0f}'.format(cutOutWS)) # "25")
xx = etree.SubElement(turbtree, "IEC_adjustment", value="0")
xx = etree.SubElement(turbtree, "NumBlades", value='{:d}'.format(nblades)) # "3")
xx = etree.SubElement(turbtree, "LowTemperatureShutDown", value="-30")
xx = etree.SubElement(turbtree, "HighTemperatureShutDown", value="45")
xx = etree.SubElement(turbtree, "LowTemperatureUnits", value="0")
xx = etree.SubElement(turbtree, "HighTemperatureUnits", value="0")
xx = etree.SubElement(turbtree, "LowTemperatureRestart", value="-20")
xx = etree.SubElement(turbtree, "HighTemperatureRestart", value="30")
xx = etree.SubElement(turbtree, "IsVariableSpeed", value="1")
xx = etree.SubElement(turbtree, "TiltAngleDegrees", value="5")
xx = etree.SubElement(turbtree, "PeakOutputKW", value='{:.0f}'.format(machineRating) ) #"3000")
xx = etree.SubElement(turbtree, "SpeedClass", value="1")
xx = etree.SubElement(turbtree, "TiClass", value="1")
xx = etree.SubElement(turbtree, "SpeedMax", value="10")
xx = etree.SubElement(turbtree, "TiMax", value="14")
xx = etree.SubElement(turbtree, "Comments")
xx.text = 'This is not a warrantied power curve.'
# Tables - Power, Thrust, RPM
ptbls = etree.SubElement(turbtree, "Power_Tables")
ptc = etree.SubElement(ptbls, "Count", value='1')
ptbls.append( makeTable("Power_Table0", vels, rho, power) )
turbtree.append( makeTable("Thrust_Table", vels, rho, thrust) )
turbtree.append( makeTable("RPM_Table", vels, rho, rpm) )
# Costs
tc = etree.SubElement(turbtree, "TotalCost", value='{:d}'.format(ttlCost))
fc = etree.SubElement(turbtree, "FoundationCost", value='{:d}'.format(fndCost))
# PeriodicCosts
if len(percosts) > 0:
pcst = etree.SubElement(turbtree, "PeriodicCosts")
pc = etree.SubElement(pcst, "Count", value='{:d}'.format(len(percosts)))
for ipc in range(len(percosts)):
pcst.append( percosts[ipc].makeXML(ipc) )
# noise
hz = [63,125,250,500,1000,2000,4000,8000]
nhz = [0,0,0,0,0,0,0,0]
addNoiseRows(turbtree, 100, 100, hz, nhz)
return turbtree
#---------------------------------------------------
class PerCost:
''' class for OpenWind Periodic Costs '''
def __init__(self, compname='None', cost=0, pyrs=0, cvbl=0, pvbl=0, isvp=0, isvc=0, cexp=1, pexp=1, cfct=1, pfct=1):
self.compname = compname
self.cost = cost
self.pyrs = pyrs
self.cvbl = cvbl
self.pvbl = pvbl
self.isvp = isvp
self.isvc = isvc
self.cexp = cexp
self.pexp = pexp
self.cfct = cfct
self.pfct = pfct
#---------------
def makeXML(self,ipc):
''' returns a tree representing a PerCost (periodic cost) item '''
pc = etree.Element('PeriodicCost{:d}'.format(ipc))
pcv = etree.SubElement(pc, 'Type')
pcv.text = 'PeriodicCost'
pcv = etree.SubElement(pc, 'Component')
pcv.text = self.compname
pcv = etree.SubElement(pc, 'Cost', value='{:d}'.format(self.cost))
pcv = etree.SubElement(pc, 'PeriodYears', value='{:d}'.format(self.pyrs))
pcv = etree.SubElement(pc, 'CostVariable', value='{:d}'.format(self.cvbl))
pcv = etree.SubElement(pc, 'PeriodVariable', value='{:d}'.format(self.pvbl))
pcv = etree.SubElement(pc, 'IsVariablePeriod', value='{:d}'.format(self.isvp))
pcv = etree.SubElement(pc, 'IsVariableCost', value='{:d}'.format(self.isvc))
pcv = etree.SubElement(pc, 'CostExponent', value='{:d}'.format(self.cexp))
pcv = etree.SubElement(pc, 'PeriodExponent', value='{:d}'.format(self.pexp))
pcv = etree.SubElement(pc, 'CostFactor', value='{:d}'.format(self.cfct))
pcv = etree.SubElement(pc, 'PeriodFactor', value='{:d}'.format(self.pfct))
return pc
#---------------------------------------------------
def makeTblRows(parent, x, xname, cr):
''' adds rows to parent
parent : an element or tree
x : an array of values
xname : name of variable
cr : name of element??
'''
vc = etree.SubElement(parent, cr, value='{:d}'.format(len(x)))
for i in range(len(x)):
vi = etree.SubElement(parent, '{:}{:}'.format(xname,i), value='{:.3f}'.format(x[i]))
#---------------------------------------------------
def addNoiseRows(parent, ttl, tnl, hz, nhz):
''' adds noise rows to parent '''
nr = etree.SubElement(parent, 'TotalNoise', value='{:.0f}'.format(ttl))
#nr = etree.SubElement(parent, 'TonalNoise', value='{:.0f}'.format(tnl))
# 2014 03 24 : tonal commented out - where/why is it needed?
for i in range(len(hz)):
vi = etree.SubElement(parent, 'Noise{:.0f}hz'.format(hz[i]), value='{:.0f}'.format(nhz[i]))
#---------------------------------------------------
def makeTable(tblName, vels, rho, y):
'''
Make XML table 'tblName'
rho is an array of air densities
- tables with multiple densities NOT YET IMPLEMENTED
y : table values - should have as many columns as rho has values
returns tbl, which should be appended to the appropriate element
'''
tbl = etree.Element(tblName)
ttype = etree.SubElement(tbl, "Type")
ttype.text = 'TurbineTable'
timin = etree.SubElement(tbl, "TI_min", value='0')
timax = etree.SubElement(tbl, "TI_max", value='60')
# x - velocities
velocities = etree.SubElement(tbl, "Velocities")
makeTblRows(velocities, vels, 'Velocity', 'Count')
# air densities
rhos = etree.SubElement(tbl, "AirDensities")
rc = etree.SubElement(rhos, "Count", value='{:}'.format(len(rho)))
for i in range(len(rho)):
rc = etree.SubElement(rhos, 'Rho{:}'.format(i), value='{:.3f}'.format(rho[i]))
if len(rho) > 1:
sys.stderr.write("\n*** WARNING - found {:} values of rho (air-density)\n".format(len(rho)))
sys.stderr.write(" Currently, only one value is allowed. y-vector will be duplicated for all rho values\n")
sys.stderr.write(" writeTurbXML.py::makeTable({:},,)\n\n".format(tblName))
# y - values
vals = etree.SubElement(tbl, "Values")
rc = etree.SubElement(vals, "Columns", value='{:}'.format(len(rho)))
for i in range(len(rho)):
rv = etree.SubElement(vals, 'Rho{:.6f}'.format(rho[i]))
makeTblRows(rv, y, 'v{:}-'.format(i), 'Rows')
return tbl
# ----------- MODIFYING -----------------
def modTurbXML(oldTurbFile, newTurbFile, rotor_diameter=None):
''' read contents of a turbine OWTG file,
modify some parameters,
write new OWTG file '''
turbtree = parseOWTG(oldTurbFile)
trbname = turbtree.getroot().tag
if rotor_diameter is not None:
turbtree.find('RotorDiameter').set('value', '{:.2f}'.format(rotor_diameter))
# ... modify other params here
# write new OWTG file
turbXML = etree.tostring(turbtree,
xml_declaration=True,
doctype='<!DOCTYPE {:}>'.format(trbname),
pretty_print=True)
ofh = open(newTurbFile, 'w')
ofh.write(turbXML)
ofh.close()
# ----------- TESTING -----------------
def main():
# main() only for testing module
nvel = 26
rho = [1.225]
thrst = [0.000, 0.000, 0.000, 0.878, 0.880, 0.881, 0.881, 0.882, 0.882, 0.843,
0.764, 0.544, 0.390, 0.297, 0.235, 0.190, 0.156, 0.131,
0.111, 0.096, 0.083, 0.073, 0.064, 0.057, 0.051, 0.046 ]
pwr = [0,0,0,50,100,150,300,1000,1500,2000,2500,
3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000,
3000, 3000, 3000, 3000, 3000,
]
rpm = [i for i in range(nvel)]
vels = [float(i) for i in range(nvel)]
hh = 100
rdiam = 126.0
ttlCost = 2000000
fndCost = 100000
percosts = []
#percosts.append(PerCost(compname='Drive Train', cost=300000, pyrs=7))
#percosts.append(PerCost(compname='Blades', cost=200000, pyrs=15))
trbname = "OWTestTurb"
desc = 'OpenWind Test Turbine'
power = pwr
thrust = thrst
hubht = hh
rtrdiam = rdiam
# generate tree structure and convert to XML string
turbtree = newTurbTree(trbname, desc, vels, power, thrust, rpm, hubht, rtrdiam, rho, percosts)
turbXML = etree.tostring(turbtree,
xml_declaration=True,
doctype='<!DOCTYPE {:}>'.format(trbname),
pretty_print=True)
# --- save to file
owtgFile = 'testTurbine.owtg'
ofh = open(owtgFile, 'w')
ofh.write(turbXML)
ofh.close()
sys.stderr.write('Wrote turbine file {:}\n'.format(owtgFile))
# --- read back file and check values
tree = parseOWTG(owtgFile)
root = tree.getroot()
print tree
rdiamOut = float(root.find('RotorDiameter').get('value'))
print 'RotorDiam: IN {:.1f} OUT {:.1f}'.format(rdiam, rdiamOut)
ptable = root.find('.//Power_Table0')
vels = ptable.find('.//Velocities')
ws = []
for v in vels:
#print v.tag, v.get('value')
if v.tag.startswith('Velocity'):
ws.append(float(v.get('value')))
vels = ptable.find('.//Velocities')
tpwr = []
#tree.xpath('/'.join([tblname,'Velocities']))[0]
ws, tpwr = getPwrTbls(root)
for i in range(len(ws)):
print '{:4.1f} {:7.1f}'.format(ws[i], tpwr[i])
name, capKW, hubHt, rtrDiam = getTurbParams(tree)
print '{:} {:} {:} {:} '.format(name, capKW, hubHt, rtrDiam)
if __name__ == "__main__":
main()
| true |
329f1a2e664c79d11ce1bf12435a35ffbdf0ccc4 | Python | shen-huang/selfteaching-python-camp | /exercises/1901010167/1001S02E03_calculator.py | UTF-8 | 1,203 | 3.59375 | 4 | [] | no_license | #<<<<<<< master
def calculator():
while True:
x=int(input('x='))
opo=str(input('输入运算符'))
y=int(input('y='))
if opo == '+':
return (x+y)
elif opo == '-':
return (x-y)
elif opo == '*':
return (x*y)
elif opo == '/':
return (x/y)
else:
print('输入有误,请重输')
print(calculator())
#知道这个程序不够完善,若要运算多个数,则会出错,但考虑这些情况进去,以目
#=======
def calculator():
while True:
x=int(input('x='))
opo=str(input('输入运算符'))
y=int(input('y='))
if opo == '+':
return (x+y)
elif opo == '-':
return (x-y)
elif opo == '*':
return (x*y)
elif opo == '/':
return (x/y)
else:
print('输入有误,请重输')
print(calculator())
#知道这个程序不够完善,若要运算多个数,则会出错,但考虑这些情况进去,以目
#>>>>>>> master
#前水平还写不出来,且Day3的任务耗的时间有点多,所以先写了简陋版的计算器 | true |
678e2aab8eed9254325aa5acc36a8ec4be683114 | Python | NancyHebert/tssbe | /data/models/researcher.py | UTF-8 | 2,073 | 2.515625 | 3 | [] | no_license | from sqlalchemy import *
from data.models.utils.postgres_mixin import PGModel
class Model(PGModel):
def __init__(self, *args, **kwargs):
PGModel.__init__(self, args, kwargs)
self.researchers_table = Table('researchers', self.metadata, autoload = True)
def get(self, uniweb_number):
try:
#handle error
self.connection = self.db_engine.connect()
stmt = select([self.researchers_table.c.available_date]).\
where(self.researchers_table.c.uniweb_number == uniweb_number)
results = self.connection.execute(stmt)
return results.fetchall()
finally:
self.connection.close()
def update(self, uniweb_number, researchers_json):
try:
res = self.get(uniweb_number)
#handle error
self.connection = self.db_engine.connect()
transaction = self.connection.begin()
if len(res) > 0: # Researcher exists in this table
avail_date = researchers_json['available_date']
if not (avail_date and avail_date.strip()): # Check for null or blank
avail_date = None
stmt = self.researchers_table.update().\
where(self.researchers_table.c.uniweb_number == uniweb_number).\
values(
available_date = avail_date
)
else: # Create the researcher in this table
stmt = self.researchers_table.insert().\
values(
uniweb_number = uniweb_number,
available_date = researchers_json['available_date']
)
results = self.connection.execute(stmt)
transaction.commit()
return results
except:
transaction.rollback()
raise
finally:
self.connection.close()
# Instantiated once when the odin app loads rather than on every call.
researcher = Model()
| true |
cdfa59d7f806a19ce668f8269d5acdf79539326b | Python | StrongWind001/MagicAuto | /MagicAuto/common/operateYaml.py | UTF-8 | 371 | 2.515625 | 3 | [
"MIT"
] | permissive | #! -*- coding:utf-8 -*-
import yaml
def getyaml(fileName):
try:
with open(fileName,'r',encoding='utf-8') as f:
ret = yaml.load(f)
print(ret)
return ret
except FileNotFoundError:
print(u"找不到文件")
if __name__ == "__main__":
getyaml(r"D:\AutoEnvironment\MagicAuto\devices\devices.yaml") | true |
7b4f84ae9cb817223f3c46121a4c585bb6927d35 | Python | Aboostrom/Blackjack-study | /dealer_hand.py | UTF-8 | 186 | 2.875 | 3 | [] | no_license | from deck import Deck
class Dealer:
def __init__(self, card):
self.deck = Deck().cards_as_array()
self.deck.remove(card)
self.hand = [card, self.deck.pop()]
| true |
1318bfeeb9610ec618afcfc24ecd7ec61e9f7d4a | Python | krnorris65/keahua-arboretum | /actions/feed_animals/feed_animal.py | UTF-8 | 1,533 | 3.875 | 4 | [] | no_license | import os
from .choose_animal import choose_animal
def feed_animal(arboretum):
'''Presents a list of animals a user can feed.
Arguments:
arboretum that animal will be in
'''
# list all types of animals
# once user selects an animal, create a list of all that animal in the arboretum (organize by biome)
# from that list select a specific animal to feed
# once an animal is selected, display a menu of available food
# select type of food to feed animal and display message showing the animal was fed
os.system('cls' if os.name == 'nt' else 'clear')
selected_animal = None
print("1. River Dolphin")
print("2. Gold Dust Day Gecko")
print("3. Nene Goose")
print("4. Kīkākapu")
print("5. Pueo")
print("6. 'Ulae")
print("7. Ope'ape'a")
print("8. Happy-Face Spider")
print("\nChoose type of animal to feed.")
choice = input(" > ")
if choice == "1":
selected_animal = "River Dolphin"
if choice == "2":
selected_animal = "Gold Dust Day Gecko"
if choice == "3":
selected_animal = "Nene Goose"
if choice == "4":
selected_animal = "Kīkākapu"
if choice == "5":
selected_animal = "Pueo"
if choice == "6":
selected_animal = "'Ulae"
if choice == "7":
selected_animal = "Ope'ape'a"
if choice == "8":
selected_animal = "Hawaiian Happy-Face Spider"
if selected_animal != None:
choose_animal(selected_animal, arboretum)
| true |
831cff05f7ea267cb32aad4b6de460bbcaa7c132 | Python | chauhanmahavir/Python-Basics | /4.py | UTF-8 | 221 | 3 | 3 | [
"MIT"
] | permissive | example=10;
print(example);
example="hii"+"hello";
print(example)
exa=print("hello")
print(exa)
x,y=(3,5) #(x,y)=(3,5) , x,y=3,5
print(x)
print(y)
'''
x,y=(3,5,6) error :- too many value to unpack
'''
| true |
18306f50ae8377818d5415b5a76f5b69479770cd | Python | gracie524/comp5349 | /workload1.py | UTF-8 | 1,344 | 2.65625 | 3 | [] | no_license | from pyspark import SparkContext
import argparse
if __name__ == "__main__":
sc = SparkContext(appName="work1")
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="the input path",
default='~/assignment/')
parser.add_argument("--output", help="the output path",
default='work1')
args = parser.parse_args()
input_path = args.input
output_path = args.output
df = sc.textFile(input_path + "AllVideos_short.csv")
alldata = df.map(lambda line:line.split(","))
def filter_header(line):
if line[0] == "video_id":
return False
else:
return True
data = alldata.filter(lambda line:filter_header(line))
data.collect(5)
num_id = data.map((lambda x:x[3],x[0])).distict().countByValue()
num_id.collect(5)
num_co = data.map((lambda x:x[0],x[11])).distict().countByValue()
num_co.collect(5)
cata_id = data.map((lambda x:x[0],x[3])).join(num_co)
cata_id.collect(5)
num_idco = cata_id.values().map((lambda x:x[0],x[1]))
num_idco.take(5)
sum_country = num_idco.reduceByKey((lambda x,y:x+y))
cata_result = sum_country.join(num_id)
finalresult = cata_result.mapValues((lambda x: x[0]/x[1]))
finalresult.take(5)
finalresult.saveAsTextFile(output_path)
| true |
54a5d2f849cde12116652daa7ceed7dd652d988b | Python | FilipaNunes/CrossDocking | /cross.py | UTF-8 | 1,173 | 3.046875 | 3 | [] | no_license | import os
import pandas as pd
import jedi
data = pd.read_excel('data.xlsx')
# define data as a matrix
data.as_matrix()
i=0
j=0
same_client = [1] * len(data)
# the following cycle determines the number of packages to be delivered to a specific city
while i < len(data) - 1:
# if the next iteration is verified it means that the packages are to be delivered to the same city
if (data.iloc[i,1] == data.iloc[i+1,1]):
# if the next iteration is verified it means that the packages are to be delivered to the same client
if (data.iloc[i,2] == data.iloc[i+1,2]):
# if the next iteration is verified then the arrival times of the packages are arriving with more than one day difference.
# if this happen then the one with earliest EDD needs to have its dispatch delayed (a package can never spend more than 24 hours in the cross-dock)
if(abs(data.iloc[i,3] - data.iloc[i+1,3])) > 1:
#???????????????
print(i)
else:
same_client[j] = same_client[j] + 1
else:
j = j + 1
i = i + 1
else:
i = i + 1
print(same_client[:])
| true |
765f4f2f79d7144c33a224830dce6ff622755835 | Python | mak705/Python_interview | /oops/class7_1.py | UTF-8 | 10,556 | 3.96875 | 4 | [] | no_license | def outer_function(): #outer function doesnt take any params
message = 'Hi' # Locat variable
def inner_function(): #Inner function will print the result
print message
return inner_function()
outer_function()
>> Hi
-------------------------------------------------------------------
def outer_function(): #outer function doesnt take any params
message = 'Hi' # Locat variable
def inner_function(): #Inner function will print the result
print message
return inner_function
my_func = outer_function()
my_func()
>> Hi
-------------------------------------------------------------------
#DECORATOER
#decorator > taking other function as argument
def decorator_function(orginal_function):
def wrapper_function(): #Inner function will print the result
return orginal_function()
return wrapper_function
def display():
print 'display function ran'
decorated_dispaly = decorator_function(display) #dispaly function = orginal function with no decorator and return
#the orginal function in this case its dislay function
#decorator function waiting wrapper function to be executed will
#return the orginal function
decorated_dispaly() #actuallly executing wrapper function which then execting display function and print
#display function ran
>> display function ran
---------------------------------------------------------------------
#decorator will help to add functionality to our existing functions, by adding functionallty inside the wrapper
def decorator_function(orginal_function):
def wrapper_function():
print ('wrapper executed this before {}'.format(orginal_function.__name__))#We can see this message
#print first before the orginal function
return orginal_function()
return wrapper_function
def display():
print 'display function ran'
decorated_dispaly = decorator_function(display)
decorated_dispaly()
>>wrapper executed this before display
display function ran
---------------------------------------------------------------------
#decorator will help to add functionality to our existing functions, by adding functionallty inside the wrapper
def decorator_function(orginal_function):
def wrapper_function():
print ('wrapper executed this before {}'.format(orginal_function.__name__))#We can see this message
#print first before the orginal function
return orginal_function()
return wrapper_function
@decorator_function
def display():
print 'display function ran'
decorated_dispaly = decorator_function(display)
display()
#@decorator_function => display = decorator_function(display)
#decorated_dispaly()
>> wrapper executed this before display
display function ran
------------------------------------------------------------------------
def decorator_function(orginal_function):
def wrapper_function():
print ('wrapper executed this before {}'.format(orginal_function.__name__))#We can see this message
#print first before the orginal function
return orginal_function()
return wrapper_function
@decorator_function
def display():
print 'display function ran'
decorated_dispaly = decorator_function(display)
def display_info(name,age):
print ('display info ran with the args({},{})'.format(name,age))
display_info('mak',28)
display()
>> display info ran with the args(mak,28)
wrapper executed this before display
display function ran
---------------------------------------------------------------------------
def decorator_function(orginal_function):
def wrapper_function(*args,**kwargs):
print ('wrapper executed this before {}'.format(orginal_function.__name__))#We can see this message
#print first before the orginal function
return orginal_function(*args,**kwargs)
return wrapper_function
@decorator_function
def display():
print 'display function ran'
decorated_dispaly = decorator_function(display)
@decorator_function
def display_info(name,age):
print ('display info ran with the args({},{})'.format(name,age))
display_info('mak',28) # if we dont give *args and **kwargs it will throw error as argument 2 given
display()
>>wrapper executed this before display_info
display info ran with the args(mak,28)
wrapper executed this before display
display function ran
-------------------------------------------------------------------------
def decorator_function(orginal_function):
def wrapper_function(*args,**kwargs):
print ('wrapper executed this before {}'.format(orginal_function.__name__))#We can see this message
#print first before the orginal function
return orginal_function(*args,**kwargs)
return wrapper_function
class decorator_class(object):
def __init__(self,orginal_function):
self.orginal_function = orginal_function
def __call__(self, *args,**kwargs):
print ('Call executed this before {}'.format(self.orginal_function.__name__))
return self.orginal_function(*args,**kwargs)
@decorator_class
def display():
print 'display function ran'
decorated_dispaly = decorator_function(display)
@decorator_class
def display_info(name,age):
print ('display info ran with the args({},{})'.format(name,age))
display_info('mak',28)
display()
>>Call executed this before display_info
display info ran with the args(mak,28)
Call executed this before display
display function ran
------------------------------------------------------------------------
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)
def wrapper(*args, **kwargs):
logging.info('Rans with args:{}, and kwargs {}'.format(args,kwargs))
return orig_func(*args, **kwargs)
return wrapper
@my_logger
def display_info(name,age):
print 'display info ran with 2 argument ({},{})'.format(name,age)
display_info('mak',28)
#run as python file , you will login report as display_info.log
>> display info ran with 2 argument (mak,28)
---------------------------------------------------------------------------
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)
def wrapper(*args, **kwargs):
logging.info('Ran with args:{}, kwargs:{}'.format(args,kwargs))
return orig_func(*args, **kwargs)
return wrapper
@my_logger
def display_info(name,age):
print 'display info ran with 2 argument ({},{})'.format(name,age)
display_info('mak', 28)
>> display info ran with 2 argument (mak,28)
---------------------------------------------------------------------------
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)
def wrapper(*args, **kwargs):
logging.info('Ran with args:{}, kwargs:{}'.format(args,kwargs))
return orig_func(*args, **kwargs)
return wrapper
def my_timer(orig_func):
import time
def wrapper(*args, **kwargs):
t1 = time.time()
result = orig_func(*args, **kwargs)
t2 = time.time() -t1
print ('{} ran in : {} sec'.format(orig_func.__name__,t2))
return result
return wrapper
import time
@my_timer
def display_info(name,age):
time.sleep(1)
print 'display info ran with 2 argument ({},{})'.format(name,age)
display_info('mak', 28)
>>display info ran with 2 argument (mak,28)
display_info ran in : 1.00041294098 sec
-------------------------------------------------------------------
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)
def wrapper(*args, **kwargs):
logging.info('Ran with args:{}, kwargs:{}'.format(args,kwargs))
return orig_func(*args, **kwargs)
return wrapper
def my_timer(orig_func):
import time
def wrapper(*args, **kwargs):
t1 = time.time()
result = orig_func(*args, **kwargs)
t2 = time.time() -t1
print ('{} ran in : {} sec'.format(orig_func.__name__,t2))
return result
return wrapper
import time
@my_timer
@my_logger
def display_info(name,age):
time.sleep(1)
print 'display info ran with 2 argument ({},{})'.format(name,age)
display_info('mak', 28) #Some random output which we dont want, we want display in ran 1.0 sec in the
#log file output wont added
>> display info ran with 2 argument (mak,28)
wrapper ran in : 1.00123000145 sec
------------------------------------------------------------------------
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)
def wrapper(*args, **kwargs):
logging.info('Ran with args:{}, kwargs:{}'.format(args,kwargs))
return orig_func(*args, **kwargs)
return wrapper
def my_timer(orig_func):
import time
def wrapper(*args, **kwargs):
t1 = time.time()
result = orig_func(*args, **kwargs)
t2 = time.time() -t1
print ('{} ran in : {} sec'.format(orig_func.__name__,t2))
return result
return wrapper
import time
#@my_logger
#@my_timer
def display_info(name,age):
time.sleep(1)
print 'display info ran with 2 argument ({},{})'.format(name,age)
display_info = my_timer(display_info)
print (display_info.__name__)
display_info('mak',28)
>>wrapper
display info ran with 2 argument (mak,28)
display_info ran in : 1.00207591057 sec
------------------------------------------------------------------------
from functools import wraps
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__), level=logging.INFO)
@wraps(orig_func)
def wrapper(*args, **kwargs):
logging.info('Ran with args:{}, kwargs:{}'.format(args,kwargs))
return orig_func(*args, **kwargs)
return wrapper
def my_timer(orig_func):
import time
@wraps(orig_func)
def wrapper(*args, **kwargs):
t1 = time.time()
result = orig_func(*args, **kwargs)
t2 = time.time() -t1
print ('{} ran in : {} sec'.format(orig_func.__name__,t2))
return result
return wrapper
import time
#@my_logger
#@my_timer
def display_info(name,age):
time.sleep(1)
print 'display info ran with 2 argument ({},{})'.format(name,age)
display_info = my_timer(display_info)
print (display_info.__name__)
display_info('mak',28)
>> display_info
display info ran with 2 argument (mak,28)
display_info ran in : 1.00178098679 sec
| true |
fcb102f306b40277035ca441338cc5a7b33318e6 | Python | JunjieZhouwust/Coronavirus-Estimation | /Coronavirus Estimationv1.1.py | UTF-8 | 5,347 | 2.9375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用于正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
L = 60 # 1月1日开始计算,总共预测50天
Today = 31 # 今天第31天
def dShift(lst, k):
return lst[k:] + lst[:k]
def dPPNum(n, k, rate, ShutDay=24, StartDay=16, StartNum=100):
dFun = lambda v, n, k: v[n - 1] + k * (v[n - 3] - v[n - 9]) - v[0]
Y = [0 for i in range(L)]
lst = [0 for i in range(n)]
lst[-1] = StartNum
for i, y in enumerate(Y):
y = dFun(lst, n, k)
y = np.maximum(y, 0)
if i > ShutDay: # 封城在1月24日左右,传播率降低,假定每天以rate系数衰减
k = k * rate
if i > StartDay: # 默认从1月1号开始
lst = dShift(lst, 1)
lst[-1] = y
Y[i] = y
return Y
def dPPNum0():
Y00 = [0 for i in range(Today)] # 国家
Y00[20] = 291 # 国家卫健委
Y00[21] = 440 # 国家卫健委
Y00[22] = 571 # 国家卫健委
Y00[23] = 830 # 国家卫健委
Y00[24] = 1287 # 国家卫健委
Y00[25] = 1975 # 国家卫健委
Y00[26] = 2744 # 国家卫健委
Y00[27] = 4515 # 国家卫健委
Y00[28] = 5974 # 国家卫健委
Y00[29] = 7711 # 国家卫健委
Y00[30] = 9692 # 国家卫健委
Y01 = [0 for i in range(Today)] # 湖北
Y01[20] = 270 # 湖北卫健委
Y01[21] = 375 # 湖北卫健委
Y01[22] = 444 # 湖北卫健委
Y01[23] = 549 # 湖北卫健委
Y01[24] = 729 # 湖北卫健委
Y01[25] = 1052 # 湖北卫健委
Y01[26] = 1423 # 湖北卫健委
Y01[27] = 2714 # 湖北卫健委
Y01[28] = 3554 # 湖北卫健委
Y01[29] = 4586 # 湖北卫健委
Y01[30] = 5806 # 湖北卫健委
Y02 = [0 for i in range(Today)] # 武汉
Y02[20] = 258 # 武汉卫健委
Y02[21] = 363 # 武汉卫健委
Y02[22] = 425 # 武汉卫健委
Y02[23] = 495 # 武汉卫健委
Y02[24] = 572 # 武汉卫健委
Y02[25] = 618 # 武汉卫健委
Y02[26] = 698 # 武汉卫健委
Y02[27] = 1590 # 武汉卫健委
Y02[28] = 1905 # 武汉卫健委
Y02[29] = 2261 # 武汉卫健委
Y02[30] = 2639 # 武汉卫健委
Y03 = [Y00[i] - Y01[i] for i in range(Today)] # 外省=国家-湖北
Y04 = [Y01[i] - Y02[i] for i in range(Today)] # 外地=湖北-武汉
Y05 = [Y03[i] / (500 * 0.35) * (1400) for i in range(Today)] # 外省-反推
Y06 = [Y04[i] / (500 * 0.65) * (1400) for i in range(Today)] # 外地-反推
return Y00, Y01, Y02, Y03, Y04, Y05, Y06
def dDrawPlot(N, rate, ShutDay, StartDay, StartNum):
K = [0 for i in range(4)]
K[1] = 0.72
K[2] = 0.72
K[3] = 0.31
StartNum1 = StartNum # 武汉外流500万,外省占35%
StartNum2 = StartNum * 2 # 武汉外流500万,省内占65%
StartNum3 = StartNum * 4 # 武汉初始人数
StartDay1 = StartDay + 2 # 外省滞后2天
StartDay2 = StartDay + 5 # 外地滞后5天
StartDay3 = StartDay # 开始传播
P00, P01, P02, P03, P04, P05, P06 = dPPNum0()
P1 = dPPNum(N, K[1], rate, ShutDay, StartDay1, StartNum1)
P2 = dPPNum(N, K[2], rate, ShutDay, StartDay2, StartNum2)
P3 = dPPNum(26, K[3], rate, 24, StartDay3, StartNum3)
P0 = [P1[i] + P2[i] + P3[i] for i in range(L)]
plt.plot(P00, "c-o", lw=2, label="@全国数据(20-%d日)" % (Today))
plt.plot(P03, "r-", lw=2, label="@外省数据(全国减湖北)")
plt.plot(P04, "g-", lw=2, label="@外地数据(湖北减武汉)")
plt.plot(P02, "b-", lw=2, label="@武汉数据")
plt.plot(P0, "c--", lw=1, label="@全国估计: 外省+外地+武汉")
plt.plot(P1, "r--", lw=1, label="@外省估计:传播系数k:%.4f 初始人数:%d" % (K[1], StartNum1))
plt.plot(P2, "g--", lw=1, label="@外地估计:传播系数k:%.4f 初始人数:%d" % (K[2], StartNum2))
plt.plot(P3, "b--", lw=1, label="@武汉估计:传播系数k:%.4f 初始人数:%d" % (K[3], StartNum3))
plt.legend()
plt.ylabel(r"$Num$")
des = r"人数估计曲线图(科研用途)--%d天治愈+%d号封城模型: $y_{[i]}=y_{[i-1]}+k(y_{[i-3]}-y_{[i-9]})-y_{[i-22]}$" % (N, ShutDay)
des = des + r"$(\ if\ i\geq%d\ k_{i}=%.2f*k_{i-1}$)" % (ShutDay, rate)
plt.title(des, fontsize=12, fontweight='heavy', color='blue')
dti = pd.date_range('20200101', periods=L, freq='1D', name='dt')
pydate_array = dti.to_pydatetime()
date_only_array = np.vectorize(lambda s: s.strftime('%m-%d'))(pydate_array)
date_only_series = pd.Series(date_only_array)
plt.xticks(np.arange(0, L, 1), date_only_series, rotation=45)
plt.grid(axis="x", ls='-.', c='#111111')
plt.grid(axis="y", ls='-.', c='#111111')
plt.xlim((0, L - 1)) # 显示用
plt.ylim((0, np.max(P0) * 1.02)) # 显示用
if __name__ == "__main__":
plt.subplot(111)
dDrawPlot(22, 0.7, 27, 14, 100)
des = r"人数估计曲线图(科研用途)1"
plt.get_current_fig_manager().window.showMaximized()
fig = plt.gcf()
plt.show()
fig.savefig("{}.png".format(des), dpi=200, bbox_inches='tight')
| true |
d1ccc3da439a96c7b25af277ac166685882eb264 | Python | BinaryBurger/Cron-o-graph-Nagios | /binaryburger-cronograph-nagios.py | UTF-8 | 2,573 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
binaryburger-cronograph-nagios.py: Nagios plugin to monitor servers executing tasks managed by the BinaryBurger Cron-o-graph
Author: Jens Nistler <loci@binaryburger.com>
License: GPL
Version: 1.0
"""
import argparse, sys, urllib2, base64, json
# Constants
EXIT_OK = 0
EXIT_WARN = 1
EXIT_CRIT = 2
EXIT_UNKNOWN = 3
class cronograph_check:
uri = "http://www.binaryburger.com/cronograph/api/"
message = False
def _set_message(self, message):
self.message = message
def _exit(self, status):
if self.message:
print self.message
sys.exit(status)
def check(self):
"""Parse command line arguments and get status
"""
parser = argparse.ArgumentParser(
description="BinaryBurger Cron-o-graph Nagios plugin"
)
parser.add_argument(
"--server",
dest="Server",
help="The server name as shown on the Cron-o-graph web interface",
required=True
)
parser.add_argument(
"--secret",
dest="Secret",
help="The server secret as shown on the Cron-o-graph web interface",
required=True
)
parser.add_argument(
"--api",
dest="API",
help="Set different API URI",
required=False,
default="http://www.binaryburger.com/cronograph/api/"
)
args = parser.parse_args()
if args.API:
self.uri = args.API
request = urllib2.Request(self.uri + "status")
request.add_header("Authorization", "Basic %s" % base64.encodestring("%s:%s" % (args.Server, args.Secret))[:-1])
try:
response_handle = urllib2.urlopen(request)
response_json = json.loads(response_handle.read())
# check response
if not response_json["status"] or response_json["status"] not in [EXIT_OK, EXIT_WARN, EXIT_CRIT]:
self._set_message("API response did not contain a status code")
self._exit(EXIT_UNKNOWN)
# set status message
if response_json["message"]:
self._set_message(response_json["message"])
self._exit(response_json["status"])
except IOError, e:
message = "API request failed"
if hasattr(e, "code") and e.code != 0:
message += " (error code " + str(e.code) + ")"
self._set_message(message)
self._exit(EXIT_UNKNOWN)
except ValueError, e:
message = "Failed to decode API response"
if hasattr(e, "message") and e.message.strip():
message += "( " + e.message.strip() + ")"
self._set_message(message)
self._exit(EXIT_UNKNOWN)
except Exception, e:
self._set_message("An unexpected error happened (" + str(type(e)) + ")")
self._exit(EXIT_UNKNOWN)
# run the check
if __name__ == "__main__":
check = cronograph_check()
check.check()
| true |
3486ff970216445c00c70301df48c8586717cf21 | Python | lordzizzy/leet_code | /04_daily_challenge/2021/03-mar/week2/swapping_nodes_linked_list.py | UTF-8 | 4,992 | 3.546875 | 4 | [] | no_license | # https://leetcode.com/explore/challenge/card/march-leetcoding-challenge-2021/589/week-2-march-8th-march-14th/3671/
# You are given the head of a linked list, and an integer k.
# Return the head of the linked list after swapping the values of the kth node
# from the beginning and the kth node from the end (the list is 1-indexed).
# Example 1:
# Input: head = [1,2,3,4,5], k = 2
# Output: [1,4,3,2,5]
# Example 2:
# Input: head = [7,9,6,6,7,8,3,0,9,5], k = 5
# Output: [7,9,6,6,8,7,3,0,9,5]
# Example 3:
# Input: head = [1], k = 1
# Output: [1]
# Example 4:
# Input: head = [1,2], k = 1
# Output: [2,1]
# Example 5:
# Input: head = [1,2,3], k = 2
# Output: [1,2,3]
# Constraints:
# The number of nodes in the list is n.
# 1 <= k <= n <= 10⁵
# 0 <= Node.val <= 100
from typing import Callable, List, Optional
from termcolor import colored
from shared.linked_list import ListNode, build_linked_list, build_node_data_list
class Solution:
def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
return self.swapNodes_only_values(head, k)
def swapNodes_only_values(
self, head: Optional[ListNode], k: int
) -> Optional[ListNode]:
walker = runner = head
for _ in range(k - 1):
runner = runner.next
first, runner = runner, runner.next
while runner:
walker = walker.next
runner = runner.next
walker.val, first.val = first.val, walker.val
return head
def swapNodes_only_values_list(
self, head: Optional[ListNode], k: int
) -> Optional[ListNode]:
if head is None:
return None
nodes = [head]
cur = head
while cur:
cur = cur.next
if cur:
nodes.append(cur)
n1, n2 = nodes[k - 1], nodes[-k]
n1.val, n2.val = n2.val, n1.val
return head
def swapNodes_2ptrs(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
dummy = ListNode(val=0, next=head)
walker = runner = first = dummy
for _ in range(k):
first = runner
runner = runner.next
while runner.next:
walker = walker.next
runner = runner.next
left, right = first.next, walker.next
if right.next is left:
left, right = right, left
first, walker = walker, first
left_next, right_next = left.next, right.next
if left_next is right:
first.next = right
right.next = left
left.next = right_next
else:
first.next, walker.next = right, left
right.next, left.next = left_next, right_next
return dummy.next
# todo
def swapNodes_list(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:
# lst: List[ListNode] = []
# curr = head
# while curr:
# lst.append(curr)
# curr = curr.next
# i = k - 1
# if lst[i] is not lst[-k]:
# if len(lst) == 2:
# lst[0].next = None
# lst[1].next = lst[0]
# return lst[1]
# lst[i - 1].next = lst[-k]
# tmp = lst[i].next
# lst[i].next = lst[-k].next
# lst[-k].next = tmp
# lst[-(k + 1)].next = lst[i]
# return lst[0]
raise NotImplementedError()
NodeDataList = List[int]
SolutionFunc = Callable[[Optional[ListNode], int], Optional[ListNode]]
def test_solution(nodes: NodeDataList, k: int, expected: NodeDataList) -> None:
def test_impl(
func: SolutionFunc, nodes: NodeDataList, k: int, expected: NodeDataList
) -> None:
head = build_linked_list(nodes)
r = func(head, k)
r_nodes = build_node_data_list(r)
if r_nodes == expected:
print(
colored(
f"PASSED {func.__name__} => {nodes} swapped at {k}th element from the beginning and the {k}th node from the end is {r_nodes}",
"green",
)
)
else:
print(
colored(
f"PASSED {func.__name__} => {nodes} swapped at {k}th element from the beginning and the {k}th node from the end is {r_nodes} but expected {expected}",
"red",
)
)
sln = Solution()
test_impl(sln.swapNodes_only_values, nodes, k, expected)
test_impl(sln.swapNodes_only_values_list, nodes, k, expected)
test_impl(sln.swapNodes_2ptrs, nodes, k, expected)
if __name__ == "__main__":
test_solution(nodes=[1, 2, 3, 4, 5], k=2, expected=[1, 4, 3, 2, 5])
test_solution(
nodes=[7, 9, 6, 6, 7, 8, 3, 0, 9, 5],
k=5,
expected=[7, 9, 6, 6, 8, 7, 3, 0, 9, 5],
)
test_solution(nodes=[1], k=1, expected=[1])
test_solution(nodes=[1, 2], k=1, expected=[2, 1])
test_solution(nodes=[1, 2, 3], k=2, expected=[1, 2, 3])
| true |
5f20fe776c15b6cb5f8c24c9bab47ff86f12bcf8 | Python | nilearn/nilearn | /examples/01_plotting/plot_haxby_masks.py | UTF-8 | 1,872 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | """
Plot Haxby masks
================
Small script to plot the masks of the Haxby dataset.
"""
#########################################################################
# Load Haxby dataset
# ------------------
from nilearn import datasets
haxby_dataset = datasets.fetch_haxby()
# print basic information on the dataset
print(
f"First subject anatomical nifti image (3D) is at: {haxby_dataset.anat[0]}"
)
print(
f"First subject functional nifti image (4D) is at: {haxby_dataset.func[0]}"
)
# Build the mean image because we have no anatomic data
from nilearn import image
func_filename = haxby_dataset.func[0]
mean_img = image.mean_img(func_filename)
z_slice = -14
#########################################################################
# Plot the masks
# --------------
import matplotlib.pyplot as plt
from nilearn.plotting import plot_anat, show
fig = plt.figure(figsize=(4, 5.4), facecolor="k")
display = plot_anat(
mean_img, display_mode="z", cut_coords=[z_slice], figure=fig
)
mask_vt_filename = haxby_dataset.mask_vt[0]
mask_house_filename = haxby_dataset.mask_house[0]
mask_face_filename = haxby_dataset.mask_face[0]
masks = [mask_vt_filename, mask_house_filename, mask_face_filename]
colors = ["red", "blue", "limegreen"]
for mask, color in zip(masks, colors):
display.add_contours(
mask,
contours=1,
antialiased=False,
linewidth=4.0,
levels=[0],
colors=[color],
)
# We generate a legend using the trick described on
# http://matplotlib.sourceforge.net/users/legend_guide.httpml#using-proxy-artist
from matplotlib.patches import Rectangle
p_v = Rectangle((0, 0), 1, 1, fc="red")
p_h = Rectangle((0, 0), 1, 1, fc="blue")
p_f = Rectangle((0, 0), 1, 1, fc="limegreen")
plt.legend([p_v, p_h, p_f], ["vt", "house", "face"], loc="lower right")
show()
# sphinx_gallery_dummy_images=1
| true |
821e6db208ae14d86e3d86ebe577870239a462c6 | Python | shiyoung77/6DoF_Pose_Estimation_with_Particle_Filtering | /pf_pose_estimation/preprocess.py | UTF-8 | 1,698 | 2.5625 | 3 | [
"BSD-2-Clause"
] | permissive | import os
import time
import numpy as np
import trimesh
from mesh_to_sdf import mesh_to_voxels # pip install mesh-to-sdf; https://github.com/marian42/mesh_to_sdf
def mesh_to_tsdf(mesh_path, vol_dim=101, save_path=None):
mesh = trimesh.load(mesh_path)
voxels = np.swapaxes(mesh_to_voxels(mesh, vol_dim - 2, pad=True), 0, 2)
scale = 2 / np.max(mesh.bounding_box.extents)
voxel_size = 2 / (vol_dim - 1) / scale
vol_origin = mesh.bounding_box.centroid - voxel_size * (vol_dim - 1) / 2
tsdf_vol = voxels.reshape(-1)
weight_vol = np.ones_like(tsdf_vol) * 100
color_vol = np.ones_like(tsdf_vol)
if save_path is None:
save_path = os.path.join(os.path.dirname(mesh_path), "tsdf.npz")
np.savez_compressed(save_path,
vol_dim=np.array([vol_dim, vol_dim, vol_dim], dtype=np.int32),
vol_origin=np.ascontiguousarray(vol_origin, dtype=np.float32),
voxel_size=voxel_size,
trunc_margin=0.015,
tsdf_vol=tsdf_vol,
weight_vol=weight_vol,
color_vol=color_vol
)
print(f"tsdf volume has been saved to: {save_path}")
return save_path
if __name__ == '__main__':
print("Start generating TSDF from mesh model. This may take a few minutes...")
models = "/home/lsy/dataset/YCB_Video_Dataset/models_16k/" # CHANGE THIS PATH
objects = ['001_pringles'] # CHANGE THIS PATH
for obj in objects:
print(f"start processing {obj}")
tic = time.time()
mesh_path = os.path.join(models, obj, "google_16k", "textured.obj") # CHANGE THIS PATH
obj_tsdf_path = mesh_to_tsdf(mesh_path, vol_dim=101)
print(f"Convert mesh to TSDF in {time.time() - tic}s.")
| true |
1bfc4853ada992122a852826f989dc45f9c7c350 | Python | zhosoft/python_learn | /lesson003/readme.py | UTF-8 | 644 | 2.875 | 3 | [] | no_license | # 模块定位顺序
# 当导入一个模块,python解析器对模块位置的搜索顺序是:
# 1、当前目录
# 2、如果不在当前目录,python则搜索在shell变量的PYTHONPATH下的每个目录
# 3、如果都找不到,python会查看默认路径,unix下,默认路径一般是/usr/local/python/
# 注意事项:
# 自己的文件名不要和已有的模块名重复,否则导致模块功能无法使用
# 使用from 模块名 import 功能的时候,如果功能名字重复,调用到的是最后定义或者导入的功能
# --------------------------------------------------------------------------------
| true |
f19dab35f971baf9a8a62a0c92c36d775ab73365 | Python | kgolezardi/simulation-project | /pqueue.py | UTF-8 | 893 | 3.328125 | 3 | [] | no_license | import heapq
import itertools
class PriorityQueue:
def __init__(self):
self.heap = []
self.counter = itertools.count()
self.entry_finder = {}
self._size = 0
def size(self):
return self._size
def push(self, priority, x):
count = next(self.counter)
entry = [priority, count, x]
self.entry_finder[x] = entry
heapq.heappush(self.heap, entry)
self._size += 1
def empty(self):
return self._size == 0
def pop(self):
while len(self.heap) > 0:
priority, count, x = heapq.heappop(self.heap)
if x is not None:
del self.entry_finder[x]
self._size -= 1
return priority, x
raise KeyError
def remove(self, x):
entry = self.entry_finder.pop(x)
self._size -= 1
entry[-1] = None
| true |
62d293f6ef145e4ef92cb4ef29b8d1fceccdea09 | Python | npkhang99/Competitive-Programming | /Codeforces/703A.py | UTF-8 | 277 | 3.5625 | 4 | [] | no_license | n = int(input())
a = [0, 0]
for i in range(n):
inp = input().split()
if inp[0] > inp[1]:
a[0] += 1
elif inp[0] < inp[1]:
a[1] += 1
if a[0] > a[1]:
print("Mishka")
elif a[0] < a[1]:
print("Chris")
else:
print("Friendship is magic!^^")
| true |
71518af222145936935b41ddbc01dcdf4a212d12 | Python | vinsmokemau/Imaging | /gamma_correction.py | UTF-8 | 2,632 | 3.65625 | 4 | [
"MIT"
] | permissive | """Histogram Equalization of an Image."""
from skimage import data, color, io
import numpy as np
import matplotlib.pyplot as plt
def get_histogram(img):
"""Return the histogram of a grayscale image.
img: numpy array [n, m, 1]
return: numpy array [256, 1, 1]
"""
rows, columns = img.shape
histogram = np.zeros(256)
for row in range(rows):
for column in range(columns):
position = img[row, column]
histogram[position] += 1
return histogram
def get_cumulative_distribution(img):
"""Return the cumulative distributuion of a grayscale image.
img: numpy array [n, m, 1]
return: numpy array to plot as graph [256, 1, 1]
"""
rows, columns = img.shape
probability = np.zeros(256)
probability = histogram / (rows * columns)
cumulative_dist = probability.cumsum()
return cumulative_dist
def gamma_correction(img, gamma):
"""Return the image after a gamma correction.
img: numpy array [n, m, 1]
return: numpy array to plot as graph [256, 1, 1]
"""
rows, columns = img.shape
output = np.zeros((rows, columns), dtype='uint8')
for row in range(rows):
for column in range(columns):
output[row, column] = (img[row, column] ** gamma) * 255
return output
# Original data
img = np.uint8(color.rgb2gray(io.imread('img/img1.jpeg')) * 255)
histogram = get_histogram(img)
cumulative_dist = get_cumulative_distribution(img)
# Normalize cumulative distribution
cumulative_dist *= (histogram.max() / cumulative_dist.max())
# Gamma Correction data
gc_img = gamma_correction(img, 0.05)
gc_histogram = get_histogram(gc_img)
gc_cumulative_dist = get_cumulative_distribution(gc_img)
# Normalize cumulative distribution
gc_cumulative_dist *= (gc_histogram.max() / gc_cumulative_dist.max())
# Plot images
fig1, axs1 = plt.subplots(1, 2)
# Plot the original image
axs1[0].imshow(img, cmap="gray")
axs1[0].set_title('Original Image')
# Plot the image after histogram equalization
axs1[1].imshow(gc_img, cmap="gray")
axs1[1].set_title('GC Image')
# Plot histograms and cumulative distributions
fig2, axs2 = plt.subplots(1, 2)
# Plot the histogram and cumulative distribution of the original image
axs2[0].plot(histogram, color='r')
axs2[0].plot(cumulative_dist, color='b')
axs2[0].set_title('Original Image')
axs2[0].legend(('histogram', 'cdf'), loc='upper left')
# Plot the image after histogram equalization
axs2[1].plot(gc_histogram, color='r')
axs2[1].plot(gc_cumulative_dist, color='b')
axs2[1].set_title('GC Image')
axs2[1].legend(('histogram', 'cdf'), loc='upper left')
plt.show()
| true |
f64948c4293e6e1e1581a3fff4662432b4b4b929 | Python | AbzGtz/PythonGames | /games/tic-tac-toe.py | UTF-8 | 8,257 | 4.03125 | 4 | [] | no_license | ########################################################################
# Global Variables
########################################################################
player_setup = {'PL1':['token','turn'],'PL2':['token','turn']} # Keeps track of a player's token - player_setup['PL1'][0] - and wheather is its turn - player_setup['PL1'][1]
standings = {'PL1':0,'PL2':0,'TIE':0}
win_combinations = ((1,2,3),(4,5,6),(7,8,9),(7,5,1),(9,5,3),(7,4,3),(8,5,2),(9,6,1))
board = list([' ']*10)
box_number = tuple(list('123456789'))
tokens = ('X','O')
game_count = 0
winner = ""
########################################################################
# Packages
########################################################################
from random import randint
########################################################################
# TOP-Level functions
########################################################################
def check_game_status():
"""
Checks if the game is over by examining the board entries and checking
them against the known winning combinations.
If the game is over, it will set the winner to X, O, or TIE.
"""
global player_setup
global win_combinations
global board
global winner
for box1,box2,box3 in win_combinations:
if board[box1] == board[box2] and board[box2] == board[box3] and board[box2] != " ":
winner = board[box1]
break
else:
if (board.count('X') + board.count('O')) == 9:
winner = 'TIE'
def display_board():
"""
Displays the Tick-Tack-Toe board and also a numbered pad so the user can remeber which
tick-tack-toe box is mapped to which number in their keyboard.
"""
global board
global box_number
print"\n"*100
print" | | " + " "*20 + " | | "
print " " + board[7] +" | "+ board[8] +" | " + board[9] + " "*20 + " " + box_number[6] +" | "+ box_number[7] +" | " + box_number[8]
print" | | " + " "*20 + " | | "
print "---|---|---" + " "*20 + "---|---|---"
print" | | " + " "*20 + " | | "
print " " + board[4] +" | "+ board[5] +" | " + board[6] + " "*20 + " " + box_number[3] +" | "+ box_number[4] +" | " + box_number[5]
print" | | " + " "*20 + " | | "
print "---|---|---" + " "*20 + "---|---|---"
print" | | " + " "*20 + " | | "
print " " + board[3] +" | "+ board[2] +" | " + board[1] + " "*20 + " " + box_number[2] +" | "+ box_number[1] +" | " + box_number[0]
print" | | " + " "*20 + " | | "
print"\n"*5
def make_a_move(player_token, box_selected = "none"):
"""
Recursive function to ask users to make a move and select a numbered-box to take. This function
also ensures the integrity of the input by the user with the below:
1) If the player enters anything other than a valid number (e.g. 1-9), it will ask again for input.
2) If the player enters a box already taken, it will tell so to the user and request for another entry.
After a successful move by a player, it calls check_game_status() and gives turn to the next player
"""
global board
global player_setup
display_board()
if box_selected != "none":
print "Square {} already has a {}.".format( box_selected, board[int(box_selected)])
# The below if/else is used to keep track of the player that just made a move and assign a turn to the next player.
if player_token == player_setup['PL1'][0]:
player = "Player 1"
player_setup['PL1'][1] = ""
player_setup['PL2'][1] = "GO"
else:
player = "Player 2"
player_setup['PL1'][1] = "GO"
player_setup['PL2'][1] = ""
# In the below 8 lines we make sure a player enters a valid number for a box (1-9),checks to make sure the box is empty.
# If not, it will recursively ask the currrent player to make an acceptable move. Once done, it will update the board
# accordinly and check if the game is over.
move = raw_input( "{} ({}). Enter the number of the box you want to take :".format(player, player_token))
while move not in box_number:
move = raw_input( "{} ({}). {} is not a valid entry. Enter the number of the box you want to take :".format(player, player_token, move))
if board[int(move)] in tokens:
make_a_move(player_token, move)
else:
board[int(move)] = player_token
check_game_status()
def quit_game():
"""
Displays the final message once the users decide to quit the game. It will display a few stats regarding the game:
Games played, number of wins per user and ties.
"""
global game_count
global standings
print"\n"*100
print "Thank you for playing !"
print "Here are the final Stats:"
print "************************************"
print "# #"
print "# Games Played: {} #".format(game_count)
print "# Player 1: {} wins #".format(standings['PL1'])
print "# Player 2: {} wins #".format(standings['PL2'])
print "# Ties : {} #".format(standings['TIE'])
print "# #"
print "************************************"
print "\n"
quit()
def reset_game ():
"""
Resets the game after the players decide they want to play another game.
"""
global player_setup
global board
global winner
player_setup = {'PL1':['token','turn'],'PL2':['token','turn']}
board = list([' ']*10)
winner = ""
def select_player():
"""
Assigns tokens to both players and designates which palyer will start first.
"""
global player_setup
token1 = " "
while token1 not in ['X','O','Q']:
token1 = raw_input( "Player 1: select your token ( O, X or q to quit): " ).upper()
if token1 == 'Q':
quit_game()
elif token1 == 'X':
player_setup['PL1'] = [token1,'GO']
player_setup['PL2'] = ['O','']
else:
player_setup['PL1'] = [token1,'GO']
player_setup['PL2'] = ['X','']
def welcome_display():
"""
Displays the 'Welcome' message when the game first starts or after every new subsecuent game.
"""
global game_count
global standings
print"\n"*100
print "************************************"
print "# #"
if game_count == 0:
print "# Welcome to Tic Tac Toe Game ! #"
print "# #"
else:
print "# Welcome to Game # {} #".format((game_count + 1))
print "# Standings so far: #"
print "# Player 1: {} wins #".format(standings['PL1'])
print "# Player 2: {} wins #".format(standings['PL2'])
print "# Ties : {} #".format(standings['TIE'])
print "# #"
print "************************************"
print "\n"*5
########################################################################
# main_game_fuction controls the flow of the game
########################################################################
def main_game_fuction ():
global winner
global player_setup
global game_count
welcome_display()
select_player()
game_count +=1
while winner == "":
if player_setup['PL1'][1] == 'GO':
make_a_move(player_setup['PL1'][0])
else:
make_a_move(player_setup['PL2'][0])
display_board()
if winner == 'TIE':
print " WE HAVE A TIE GAME !!! "
standings['TIE']+=1
elif winner == player_setup['PL1'][0]:
print " PLAYER 1 WINS !!!\n"
standings['PL1']+=1
else:
print " PLAYER 2 WINS !!!\n"
standings['PL2']+=1
result = raw_input('Would you like to play again: [y] for YES: ')
if result.upper() == 'Y':
reset_game ()
main_game_fuction ()
else:
quit_game()
########################################################################
# main_game_fuction call
########################################################################
main_game_fuction()
| true |
9b140687711e4e6f9afcd8607b4324d35ffaa3d5 | Python | MinuraSilva/superdict | /other/original_code.py | UTF-8 | 2,644 | 3.71875 | 4 | [] | no_license | import re
# Only for reference
# This is the original code written for a scraping project.
def extract_val_re(obj, keys):
"""
Input:
obj: A python dict (also allows a list of lists - not sure if that is valid JSON)
keys: Either string or compiled regex object or a list of strings and/or compiled regex objects
If a single key is given, finds all instance of the key at any level of the JSON object and returns a list of these values.
If a list of keys is given, applies the keys iteratively; i.e. first apply the first key to get result_1, then apply the second key on result_1 and so on.
Output: A list of the values of those keys.
"""
# helper to extract a single key
def extract_single(obj, key):
ret = []
if isinstance(obj, dict):
keys = obj.keys()
for k in keys:
# if key name matches, add to ret
if re.search(key, k):
ret.append(obj[k])
# if dict, recurse
if isinstance(obj[k], dict):
ret.extend(extract_val_re(obj[k], key))
elif isinstance(obj[k], list):
for li in obj[k]:
ret.extend(extract_val_re(li, key))
elif isinstance(obj, list):
for item in obj:
ret.extend(extract_val_re(item, key))
return ret
# convert keys to regex if they are not already
def convert_regex(single_or_list):
# helper to convert a single key to regex
def convert_single(str_or_regex):
if isinstance(str_or_regex, str):
return re.compile(f"^{str_or_regex}$")
elif isinstance(str_or_regex, type(re.compile("compiled_object"))):
# do nothing if already regex
return str_or_regex
else:
assert False, "key is not either a string or a compiled regex object"
# convert all keys to regex
if isinstance(single_or_list, list):
new_list = []
for itm in single_or_list:
new_list.append(convert_single(itm))
return new_list
else:
return convert_single(single_or_list)
# extract either a single key or a series of keys
if not (isinstance(keys, list)):
regex_key = convert_regex(keys)
return extract_single(obj, regex_key)
elif isinstance(keys, list):
filtered = obj
for key in keys:
regex_key = convert_regex(key)
filtered = extract_single(filtered, regex_key)
return filtered
| true |
72af059a2c6a337a82a87c9d69f3ab64e0316c50 | Python | crim-ca/RACS | /jassrealtime/document/interval.py | UTF-8 | 1,360 | 3.890625 | 4 | [] | no_license | # coding: utf-8
class Interval:
def __init__(self, begin, end, openBegin=False, openEnd=False, isFullyInclusif=True):
"""
Creates a new interval. Begin must be < end.
Example:
open,open = (a,b) = {a < x < b}
close,close = [a,b] = {a <= x <= b}
Here some example for better understanding:
isFullyInclusif = true, Interval (3,6). The following itervals would be valid to fit inside this interval:
(3,6),[2,5],(3,4] but the following will not [3,6], (2,5)
isFullyInclusif = False Interval (3,6). Main would consider edges. The following will fit:
(3,6), (2,5), but the following will not (2,3). An intersting example would be that for an interval of
bounds [1,4], the interval [4,5] would be valid.
:param begin: start of the interval
:param end: end of the interval.
:param isFullInclusif: If true, elements should be completly included in the interval.
:param openBegin: If true, the begin part should be considered Open by mathematical definition.
:param openEnd: If true, the end part should be considered Open by mathematical definition.
"""
self.begin = begin
self.end = end
self.openBegin = openBegin
self.openEnd = openEnd
self.isFullyInclusif = isFullyInclusif
| true |
adb4997a590302b524a7033e2440b36ee2f1a93f | Python | optionalg/cracking-the-coding-interview-3 | /1-3.py | UTF-8 | 253 | 3.296875 | 3 | [] | no_license | # Time: O(n^2)
# Space: O(1)
def is_permutation(str1, str2):
if len(str1) == len(str2):
for char in str1:
if char in str2:
str2.replace(char, '', 1)
if len(str1) == len(str2) == 0:
return True
| true |
a66dd49368db25a1d82eba58e960d3383878920f | Python | NewMike89/Python_Stuff | /Ch.4/4-10slices.py | UTF-8 | 902 | 4.375 | 4 | [] | no_license | # Michael Schorr
# 4/3/19
# using PLAYERS.PY to print some lines with certain sections of the list
players = ['charles', 'martina', 'michael', 'florence', 'eli']
# displays from the first index to the one before the last listed index
print(players[0:3])
print(players[1:4])
# displays from the beginning of the list to the one before the last listed index
print(players[:4])
# displays from the first listed index to the end of the listed
print(players[2:])
# displays the number of items from the end of the list
print(players[-3:])
# Looping through a slice
print("Here are the first three players in the list: ")
for player in players[:3]:
print(player.title())
print("Here are the three players in the middle of the list: ")
for player in players[1:4]:
print(player.title())
print("Here are the three players from the end of the list: ")
for player in players[-3:]:
print(player.title())
| true |
a6bb93c2538f9cbd7fbb2060508b2bdd786a18da | Python | gritjz/Python_Crawler | /7_Dynamic Loading Data Process/03_selenium自动化操作.py | UTF-8 | 626 | 2.65625 | 3 | [] | no_license | from selenium import webdriver
from time import sleep
bro = webdriver.Chrome(executable_path='./chromedriver')
bro.get('https://world.taobao.com/')
#定位搜索栏
search_input = bro.find_element_by_id('mq')
#输入搜索信息
search_input.send_keys('iPhone 12')
#执行js程序,滚屏到底
bro.execute_script('window.scrollTo(0, document.body.scrollHeight)')
sleep(2)
#定位搜索按钮
button = bro.find_element_by_xpath('//*[@id="J_PopSearch"]/div[1]/div/form/input[2]')
#点击搜索
button.click()
bro.get('https://www.baidu.com')
sleep(2)
#网页后退和前进
bro.back()
bro.forward()
sleep(5)
bro.quit() | true |
4d517490bc182816206ecd8bd5315dd41b143280 | Python | MinjeongSuh88/python_workspace | /20200731/win4.py | UTF-8 | 979 | 3.625 | 4 | [] | no_license | # 구구단 3단 출력하는 클릭 버튼 만들기
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('구구단 출력하기')
self.resize(800,600)
self.le = QLineEdit(self)
self.le.resize(100,100)
self.le.move(350,100)
font = self.le.font()
font.setPointSize(20)
self.le.setFont(font)
btn = QPushButton('클릭',self)
btn.resize(100,100)
btn.move(350,300)
btn.clicked.connect(self.print) # QLabel 모듈 가져옴
self.show()
def print(self):
n = int(self.le.text())
for i in range(1,10):
print(n,'*',str(i),'=',str(n*i))
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_()) | true |
431e9926c59c7e1e11e4ec60a336d18723f70ec6 | Python | JustinWayneOlson/Air-Traffic-Analysis | /application/src/Routing.py | UTF-8 | 7,093 | 2.921875 | 3 | [] | no_license | # Referenced http://theory.stanford.edu/~amitp/GameProgramming/AStarComparison.html for this implementation
import numpy
import sys
import matplotlib.pyplot as plt
# Lat lon of Portal ND 48.9959° N, 102.5496° W
# Lat lon of Eureka calif 40.8021° N, 124.1637° W
# Homestead fl lat lon 25.4687° N, 80.4776° W
# cutler maine lat lon 44.6576° N, 67.2039° W
# Specify the dimensions of the entire grid in miles. x,y, and altitude.
TOP_LAT = 48.9959 #Portal ND
BOT_LAT = 25.4687 #Homestead FL
LEFT_LON = 124.1637 #Eureka CA
RIGHT_LON = 67.2039 #Cutler ME
LAT_DIM = TOP_LAT - BOT_LAT
LON_DIM = LEFT_LON - RIGHT_LON
ALT_DIM = 8000
#http://stackoverflow.com/questions/28242917/numpy-array-to-graph
def coordsValid(lat_coords, lon_coords):
if (len(lat_coords) != len(lon_coords)):
return False
for i in range(1,len(lat_coords)):
if (lat_coords[i] > TOP_LAT | lat_coords[i] < BOT_LAT):
return False
if (lon_coords[i] > LEFT_LON | lon_coords[i] < RIGHT_LON):
return False
return True
def getCoords():
x = numpy.random.random_integers(low = 0, high = LAT_DIM, size = 5)
y = numpy.random.random_integers(low = 0, high = LON_DIM, size = 5)
lat = [30, 35, 40]
lon = [70, 100, 120]
coords = lat, lon
print("hola")
return coords
def set_squares_to_inf(graph, lat_coords, lon_coords, latlon_gridsize):
for i in range(0,len(lat_coords)):
# adjust coord for grid
m = lat_coords[i] - BOT_LAT
n = lon_coords[i] - RIGHT_LON
m = int(numpy.floor(m / latlon_gridsize))
n = int(numpy.floor(n / latlon_gridsize))
#prob need to change between py 2 and py 3
print(m, n)
graph[m, n] = numpy.inf
return graph
# could change add nofly zones and add weather data functions into one function and add an additional flag..
# should take in the coordinates of no fly data and map that to our grid indices
def add_nofly_zones(graph, xy_gridsize):
x_coords, y_coords = getCoords()
if(coordsValid()):
set_squares_to_inf(graph, x_coords, y_coords, xy_gridsize)
return graph
# should take in the coordinates of weather data and map that to our grid indices
def add_weather_data(graph, xy_gridsize):
x_coords, y_coords = getCoords()
set_squares_to_inf(graph, x_coords, y_coords, xy_gridsize)
return graph
def remove_lowest(graph, nodes_to_visit):
l_nodes_to_visit = (nodes_to_visit.shape)[0]
min = graph[nodes_to_visit[0][0], nodes_to_visit[0][1]]
min_index = 0
for i in range(0,l_nodes_to_visit):
size = graph[nodes_to_visit[i][0], nodes_to_visit[i][1]]
if(size < min):
min = size
min_index = i
print(min_index)
graph_index = (nodes_to_visit[min_index][0], nodes_to_visit[min_index][1])
nodes_to_visit = numpy.delete(nodes_to_visit, min_index, 0)
print(nodes_to_visit)
return [graph_index, nodes_to_visit]
def get_neighbor_index(neigh, currLoc, dims):
y = currLoc[0]
x = currLoc[1]
end_y = dims[0] - 1
end_x = dims[1] - 1
dist = numpy.inf
# up and left
if neigh == 0 and x != 0 and y != 0:
neigh_coords = [x-1, y-1]
dist = numpy.sqrt(2)
# up
elif neigh == 1 and y != 0:
neigh_coords = [x, y - 1]
dist = 1
# up and right
elif neigh == 2 and y != 0 and x != end_x:
neigh_coords = [x + 1, y - 1]
dist = numpy.sqrt(2)
# right
elif neigh == 3 and x != end_x:
neigh_coords = [x + 1, y]
dist = 1
# right and down
elif neigh == 4 and x != end_x and y != end_y:
neigh_coords = [x + 1, y + 1]
dist = numpy.sqrt(2)
# down
elif neigh == 5 and y != end_y:
neigh_coords = [x, y + 1]
dist = 1
# down and left
elif neigh == 6 and y != end_y and x != 0:
neigh_coords = [x - 1, y + 1]
dist = numpy.sqrt(2)
# left
elif neigh == 7 and x != 0:
neigh_coords = [x - 1, y]
dist = 1
return neigh_coords, dist
def get_index_num(neigh_index, graph_dims):
return(neigh_index[0] * graph_dims[1] + neigh_index[1])
def a_star(graph, origin, dest):
dims = graph.shape()
parent_graph = numpy.zeros(dims)
#set the distance at the origin node to zero
graph[origin[0], origin[1]] = 0
currLoc = origin
nodes_to_visit = {[origin[0], origin[1]]}
nodes_done = {[origin[0], origin[1]]}
nodes_done.remove(origin)
# need to store the cost of getting to a node somewhere
# there will be one extra loop here that removes the origin and sets it to our current location
while(currLoc != dest):
(currLoc, nodes_to_visit, visited, to_neigh_dist) = remove_lowest(graph, nodes_to_visit, visited)
for i in range(0, 8):
currCost = graph[currLoc] + to_neigh_dist
neigh_index = get_neighbor_index(i, currLoc, dims)
if (numpy.any(nodes_to_visit == neigh_index) and currCost < graph[neigh_index]):
nodes_to_visit.remove(neigh_index)
if (numpy.any(nodes_done == neigh_index) and currCost < graph[neigh_index]):
nodes_done.remove(neigh_index)
if (numpy.all(nodes_to_visit != neigh_index) and numpy.all(nodes_done != neigh_index)):
graph[neigh_index] = currCost
nodes_to_visit.add(neigh_index)
parent_graph[currLoc] = get_index_num(neigh_index, dims)
return graph
# huristic for A*
def h(graph, curr_node, end_node):
# get direct distance from database
lat_curr, lon_curr = graph[curr_node]
lat_end, lon_end = graph[end_node]
eucl_dist = numpy.sqrt(numpy.sqare(lat_curr - lat_end) + numpy.square(lon_curr - lat_curr))
return eucl_dist
# grid-size in miles
def generate_graph(xy_gridsize, use_weather, use_nofly):
# create empty 2-D matrix (create altitude as a later feature)
x_dim = int(numpy.ceil(LAT_DIM/xy_gridsize))
y_dim = int(numpy.ceil(LON_DIM/xy_gridsize))
print('x_dim:', x_dim)
print('y_dim:', y_dim)
graph_2d = numpy.zeros((x_dim, y_dim))
# populate graph with weather data
if use_weather:
graph_2d = add_weather_data(graph_2d, xy_gridsize)
# populate graph with no-fly zones
if use_nofly:
graph_2d= add_nofly_zones(graph_2d, xy_gridsize)
#plt.show(block=True)
#plt.plot(2,3)
plt.matshow(graph_2d)
plt.show()
#
return graph_2d
'''
def find_optimal_path(origin, dest, xy_gridsize, use_weather, use_nofly):
gridmap = generate_graph(xy_gridsize, use_weather, use_nofly)
# x and y grid square side size are same so don't need to pass in those
generated_optimal_path = a_star(gridmap, origin, dest)
diff_from_direct(generated_optimal_path)
return 9
generate_graph(1, 1, 0)
'''
| true |
c91e3a97bbb7ccc201855a343c491874d53fd6ae | Python | Ackermannn/MyLeetcode | /src/edu/neu/xsz/leetcode/algorithms/easy/066_加一.py | UTF-8 | 341 | 3.375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
leetcode 66.加一
"""
def add(x):
flag = 1
for i in range(-1,-len(x)-1,-1):
if flag + x[i] != 10:
x[i] += 1
flag = 0
return x
else:
x[i] = 0
if flag == 1:
x.append(0)
x[0] = 1
return x
x = [9,8,9]
print(add(x)) | true |
9b177952c79da4aded9db10b902370af9a2eadcd | Python | DouglasBzzz/problemas_e_solucoes_python | /bytecode_behind/main.py | UTF-8 | 108 | 3.234375 | 3 | [] | no_license | def saudacao(name):
return "Olá, "+name+" !"
print(saudacao("Douglas"))
import dis
dis.dis(saudacao) | true |
4d5e823ac441493d9130cb460ec196fc3ac9ea9b | Python | dbetm/crash-course-python | /intro/excepciones02.py | UTF-8 | 440 | 4.15625 | 4 | [] | no_license | # Capturar varias excepciones
def divide():
try:
op1 = float(input("Número 1: "))
op2 = float(input("Número 2: "))
print("La división es: " + str(op1/op2))
except ValueError:
print("El valor introducido es erróneo")
except ZeroDivisionError:
print("No se puede dividir entre 0")
# except:
# print("Error genérico")
finally: # Líneas para que se ejecute siempre, ocurra o no la excepción
print("Hecho")
divide() | true |
149290f56501785fa781eec46e587a233a2872ec | Python | EchoChloe/hdf5examples | /hdf5examples/low_level/h5ex_t_float.py | UTF-8 | 1,787 | 3.5625 | 4 | [] | no_license | """
This example shows how to read and write float datatypes to a dataset. The
program first writes floats to a dataset with a dataspace of DIM0xDIM1, then
closes the file. Next, it reopens the file, reads back the data, and outputs
it to the screen.
"""
import sys
import numpy as np
import h5py
FILE = "h5ex_t_float.h5"
DATASET = "DS1"
# Strings are handled very differently between python2 and python3.
if sys.hexversion >= 0x03000000:
FILE = FILE.encode()
DATASET = DATASET.encode()
DIM0 = 4
DIM1 = 7
def run():
# Initialize the data.
wdata = np.zeros((DIM0, DIM1), dtype=np.float64)
for i in range(DIM0):
for j in range(DIM1):
wdata[i][j] = i / (j + 0.5) + j
# Create a new file using the default properties.
fid = h5py.h5f.create(FILE)
# Create the dataspace. No maximum size parameter needed.
dims = (DIM0, DIM1)
space = h5py.h5s.create_simple(dims)
# Create the dataset and write the floating point data to it. In this
# example, we will save the data as 64 bit little endian IEEE floating
# point numbers, regardless of the native type. The HDF5 library
# automatically converts between different floating point types.
dset = h5py.h5d.create(fid, DATASET, h5py.h5t.IEEE_F64LE, space)
dset.write(h5py.h5s.ALL, h5py.h5s.ALL, wdata)
# Explicitly close and release resources.
del space
del dset
del fid
# Open file and dataset using the default properties.
fid = h5py.h5f.open(FILE)
dset = h5py.h5d.open(fid, DATASET)
rdata = np.zeros((DIM0, DIM1), dtype=np.float64)
dset.read(h5py.h5s.ALL, h5py.h5s.ALL, rdata)
# Output the data to the screen.
print("%s:" % DATASET)
print(rdata)
if __name__ == "__main__":
run()
| true |
69ab2d5d6e4e695413a8f4c60aa412e3d71f1512 | Python | iampramodyadav/FEA | /test2.py | UTF-8 | 459 | 3.125 | 3 | [
"MIT"
] | permissive | from sympy import *
def SHAPE(p,z):
'''
SHAPE(p,z)
p: order (p) of approximation
z:value of natural coordinate
This function return shape funtions values at given x
note: x=symbols('x')
'''
z=Symbol('z')
n=[]
for i in range(0, p+1):
point=-1
point=point+2*i/p
n.append(point)
shape=[1]*(p+1)
for i in range(0,p+1):
for j in range(0,p+1):
if i!=j:
shape[i]=shape[i]*((z-n[j])/(n[i]-n[j]))
return shape
| true |
a8f9ededbf2e1620f33e32b8c843a0d95654f5b8 | Python | cnbcloud/mjserver | /majiang2/src/majiang2/win_loose_result/table_results.py | UTF-8 | 1,849 | 2.515625 | 3 | [] | no_license | # -*- coding=utf-8
'''
Created on 2016年9月23日
本桌的输赢结果
1)陌生人桌,打完后直接散桌,有一个round_results
2)自建桌,SNG,打几把,有几个round_results
@author: zhaol
'''
from freetime.util import log as ftlog
class MTableResults(object):
def __init__(self):
super(MTableResults, self).__init__()
self.__results = []
self.__score = None
def reset(self):
self.__results = []
self.__score = None
@property
def score(self):
return self.__score
@property
def results(self):
return self.__results
def addResult(self, result):
#modify by youjun 05.05
self.__results.append(result)
#设置总积分
if self.__score is None:
lenCount = len(result.delta)
self.__score = [0 for _ in range(lenCount)]
for index in range(lenCount):
self.__score[index] = result.score[index]
else:
for index in range(len(self.__score)):
self.__score[index] += result.score[index]
ftlog.debug('MTableResults.addResult deltaScore:', result.score
, ' totalScore:', self.__score)
def addGangResult(self, result):
#设置杠积分
if self.__score is None:
lenCount = len(result.delta)
self.__score = [0 for _ in range(lenCount)]
for index in range(lenCount):
self.__score[index] = result.delta[index]
else:
for index in range(len(self.__score)):
self.__score[index] += result.delta[index]
ftlog.debug('MTableResults.addGangResult deltaScore:', result.score
, ' totalScore:', self.__score)
| true |
25d0ef8a47cb530092f3255109b40b285b7a1276 | Python | jichenqing/MapQuest | /mapquest_interface.py | UTF-8 | 2,499 | 2.796875 | 3 | [] | no_license | #Sue Ji 33337876
import mapquest_APIs
import mapquest_output
import json
def _user_request()->list:
'''
takes the user input for all the addresses and returns them as a list
'''
user_input=int(input())
if user_input>=2:
locations=[]
for address in range(user_input):
locations.append(input())
return locations
else:
exit()
def _request_info()->list:
'''
takes the user input and returns the requested info as a list
'''
info=[]
info_num=int(input())
if info_num>5 or info_num<1:
exit()
else:
for asked_data in range(info_num):
info.append(input())
return info
def _run_class(output:dict,classes:['class'])->dict:
'''
takes the output, returns the info when the output matches the classes
'''
info=output
for i in classes:
i.result(info)
return info
def _commands(userinput:list)->list:
'''
takes the user input and returns the requested classes info as a list
'''
infolist=[]
for info in userinput:
if info=='LATLONG':
infolist.append(mapquest_output.Latlong())
if info=='STEPS':
infolist.append(mapquest_output.Steps())
if info=='TOTALTIME':
infolist.append(mapquest_output.TotalTime())
if info=='TOTALDISTANCE':
infolist.append(mapquest_output.TotalDistance())
if info=='ELEVATION':
infolist.append(mapquest_output.Elevations())
return infolist
def _execute()->None:
'''
run the entire program under the if __name__=='__main__'
'''
try:
locations=_user_request()
classes=_request_info()
route_info=mapquest_APIs.get_result(
mapquest_APIs.build_locations_url(locations))
for key in route_info['route']:
if key=='routeError':
print('\nNO ROUTE FOUND')
else:
_run_class(mapquest_APIs.get_result(
mapquest_APIs.build_locations_url(locations)),_commands(classes))
print("\nDirections Courtesy of MapQuest; Map Data Copyright OpenStreetMap Contributors")
break
except:
print('\nMAPQUEST ERROR')
if __name__=="__main__":
'''
RUNS THE WHOLE PROGRAM WHEN THE MODULE IS EXECUTED THE FIRST TIME
'''
_execute()
| true |
83119fc71e56ece523985c4ea46e1fad2d0a065d | Python | pranjay01/leetcode_python | /LongestCommonPrefix.py | UTF-8 | 791 | 2.984375 | 3 | [] | no_license | strs=["flower","flow","flight"]
result=''
tmpres=''
if len(strs)>1:
i=0
str1=strs[0]
str2=strs[1]
while i<len(str1) and i<len(str2):
if str1[i]==str2[i]:
result=result+str1[i]
i=i+1
else:
break
if len(result)>0:
for index in range(2,len(strs)):
str1=strs[index]
i=0
tmpres=''
while i<len(str1) and i<len(result):
if result[i]==str1[i]:
tmpres=tmpres+result[i]
i=i+1
else:
result=tmpres
break
if len(result)==0:
return ('')
elif len(strs)==1:
return (strs[0])
elif len(strs==0):
return (result)
return (result) | true |
26e547cee19ebf092bac35563d6a6fcf6d4a31f7 | Python | moshegplay/moshegplay | /LABs/variables.py | UTF-8 | 235 | 2.765625 | 3 | [] | no_license | name="moshe hazan"
age=29
mail="moshe@gmail.com"
print("full name: " + name +"\nage: " + str(age) +"\nmail:" + mail)
print("full name: " + name[::-1] +"\nage: " + str(age*3))
print("moshe" in "idan ben dudu moshe shimon yeal gal adam shahar yana")
| true |
33ffdea2b86660cdb23f8f43d95661c87622477f | Python | suriyadeepan/PyroDemystified-PyCon2019 | /getorix/data.py | UTF-8 | 1,539 | 2.65625 | 3 | [] | no_license | import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch
import pandas as pd
import torchvision.transforms.functional as TF
from PIL import Image
import os
DATA = 'data/'
def mnist(batch_size=128, one_hot=False):
root = 'data/'
download = True
trans = transforms.ToTensor()
train_set = dset.MNIST(root=root, train=True, transform=trans,
download=download)
test_set = dset.MNIST(root=root, train=False, transform=trans)
train_loader = torch.utils.data.DataLoader(dataset=train_set,
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_set,
batch_size=batch_size, shuffle=False)
return train_loader, test_loader
def anscombe(group=2):
df_ans = pd.read_csv('data/anscombe.csv')
groups = [ 'I', 'II', 'III', 'IV' ]
df = df_ans[df_ans.group == groups[group - 1]]
df.x = df.x - df.x.mean()
return torch.stack(
[ torch.tensor(df.x.values).float(), torch.tensor(df.y.values).float() ]
)
def read_image_from_file(filepath):
image = Image.open(filepath)
x = TF.to_tensor(image)
return x
def notmnist():
notmnist_dir = os.path.join(DATA, 'notmnist')
return torch.stack([
read_image_from_file(os.path.join(notmnist_dir, f)).squeeze()
for f in os.listdir(notmnist_dir) if '.png' in f ])
def bap_lr():
df = pd.read_csv('data/bap_linear_regression.csv')
return torch.stack(
[ torch.tensor(df.x).float(), torch.tensor(df.y).float() ]
)
| true |
90a97443b306375d0101b19e850d02536fd63c1c | Python | Jason003/Interview_Code_Python | /uber/24game.py | UTF-8 | 583 | 2.859375 | 3 | [] | no_license | import itertools as it
class Solution:
def judgePoint24(self, nums) -> bool:
if len(nums) == 1:
return round(nums[0], 4) == 24
else:
for (i, m), (j, n) in it.combinations(enumerate(nums), 2):
new_nums = [x for t, x in enumerate(nums) if i != t != j]
inter = {m + n, abs(m - n), n * m}
if n != 0: inter.add(m / n)
if m != 0: inter.add(n / m)
if any(self.judgePoint24(new_nums + [x]) for x in inter):
return True
return False
| true |
8cdf3f49d798c59227158472bcf39e7bd6cce366 | Python | kimsup10/octopus | /octopus/ml/naive_bayes.py | UTF-8 | 1,322 | 3.296875 | 3 | [
"MIT"
] | permissive | import numpy as np
class NaiveBayes:
'''전체 유저수'''
total_user_cnt = None
'''사전확률'''
pre_prob = None
'''좋아요 수 평균'''
mean_likes_cnt = None
def __init__(self, articles):
self.prepare(articles)
def prepare(self, articles):
'''나이브 베이즈 사전확률 계산'''
self.pre_prob = {}
likes_cnt = [a.likes_count for a in articles]
self.total_user_cnt = np.max(likes_cnt)
self.mean_likes_cnt = np.mean(likes_cnt)
for article in articles:
for token in article.tokens:
prob = article.likes_count / self.total_user_cnt
self.pre_prob.setdefault(token, []).append(prob)
for k, v in self.pre_prob.items():
self.pre_prob[k] = np.mean(v)
def predict(self, article):
'''Article의 예상 좋아요 수를 계산합니다
return: 예상 좋아요수
rtype: float
'''
predicted_prob = 1.0
count = 0
for word in article.tokens:
if self.pre_prob.get(word, 0) > 0:
predicted_prob *= self.pre_prob[word]
count += 1
if count == 0:
return self.mean_likes_cnt
return predicted_prob ** (1.0/count) * self.total_user_cnt
| true |
d1455ed5e365c5e1c1cfe473e4c620b81b2e6e02 | Python | CodeTest-StudyGroup/Code-Test-Study | /JJangSungWon/삼성 기출/14502_연구소.py | UTF-8 | 2,106 | 3.09375 | 3 | [] | no_license | # boj 14502
# blog : jjangsungwon.tistory.com
import sys, copy
import itertools
from collections import deque
def bfs():
q = deque(virus)
visited = [[0] * M for _ in range(N)]
while q:
row, col = q.popleft()
# 상
if row - 1 >= 0 and temp_arr[row - 1][col] == 0 and visited[row - 1][col] == 0:
visited[row - 1][col] = 1
temp_arr[row - 1][col] = 2
q.append([row - 1, col])
# 하
if row + 1 < N and temp_arr[row + 1][col] == 0 and visited[row + 1][col] == 0:
visited[row + 1][col] = 1
temp_arr[row + 1][col] = 2
q.append([row + 1, col])
# 좌
if col - 1 >= 0 and temp_arr[row][col - 1] == 0 and visited[row][col - 1] == 0:
visited[row][col - 1] = 1
temp_arr[row][col - 1] = 2
q.append([row, col - 1])
# 우
if col + 1 < M and temp_arr[row][col + 1] == 0 and visited[row][col + 1] == 0:
visited[row][col + 1] = 1
temp_arr[row][col + 1] = 2
q.append([row, col + 1])
if __name__ == "__main__":
N, M = map(int, input().split())
arr = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]
virus = []
# 벽을 세울 수 있느 후보
temp = []
for i in range(N):
for j in range(M):
if arr[i][j] == 0:
temp.append([i, j])
elif arr[i][j] == 2:
virus.append([i, j]) # 바이러스 위치
result = list(itertools.combinations(temp, 3))
min_area = -1
# 후보 개수 만큼 진행
for k in range(len(result)):
temp_arr = copy.deepcopy(arr)
for i in range(3):
temp_arr[result[k][i][0]][result[k][i][1]] = 1 # 벽 세우기
# 바이러스 전파 시작
bfs()
cnt = 0
for i in range(N):
for j in range(M):
if temp_arr[i][j] == 0:
cnt += 1
min_area = max(min_area, cnt)
print(min_area)
| true |
1b67df272b5f8fb9875e6dd4a41a3063b1df7fcb | Python | MayankR/cmr | /demo.py | UTF-8 | 5,344 | 2.515625 | 3 | [
"MIT"
] | permissive | """
Demo of CMR.
Note that CMR assumes that the object has been detected, so please use a picture of a bird that is centered and well cropped.
Sample usage:
python -m cmr.demo --name bird_net --num_train_epoch 500 --img_path cmr/demo_data/img1.jpg
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
from absl import flags, app
import numpy as np
import skimage.io as io
import pickle
import torch
from .nnutils import test_utils
from .nnutils import predictor as pred_util
from .utils import image as img_util
from .utils import mesh
from .utils import geometry as geom_utils
flags.DEFINE_string('img_path', 'data/im1963.jpg', 'Image to run')
# flags.DEFINE_integer('img_size', 224, 'image size the network was trained on.')
flags.DEFINE_integer('use_train', -1, 'Use data from CUB train data')
opts = flags.FLAGS
def preprocess_image(img_path, img_size=256):
img = io.imread(img_path)[:,:,:-1] / 255.
# Scale the max image size to be img_size
# scale_factor = float(img_size) / np.max(img.shape[:2])
# img, _ = img_util.resize_img(img, scale_factor)
# Crop img_size x img_size from the center
# center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# img center in (x, y)
# center = center[::-1]
# bbox = np.hstack([center - img_size / 2., center + img_size / 2.])
# img = img_util.crop(img, bbox, bgval=1.)
# Transpose the image to 3xHxW
img = np.transpose(img, (2, 0, 1))
print(img.shape)
return img
def visualize(img, outputs, renderer):
vert = outputs['verts'][0]
print("vert:", vert.shape, vert[:2])
faces_car = outputs['faces']
print("faces_car:", faces_car.shape, faces_car[:2])
# sp_verts, sp_faces = mesh.create_sphere(4)
# print("created sphere")
# verts_dense = geom_utils.project_verts_on_mesh(sp_verts, vert, faces_car)
# print("dense vert:", verts_dense.shape)
# print("dense faces:", sp_faces.shape)
vert_np = (torch.autograd.Variable(vert).data).cpu().numpy()
# faces_np = (torch.autograd.Variable(faces_car).data).cpu().numpy()
print("vertnp: ", vert_np[:3])
with open('mesh_verts.pkl', 'wb') as handle:
pickle.dump([vert_np, faces_car], handle)
print("pickled out mesh")
print("vert shape", outputs['verts'].shape)
cam = outputs['cam_pred'][0]
print("cam", cam)
texture = outputs['texture'][0]
# cam[-1] = 0
# cam[-2] = 0
# cam[-3] = 0
# cam[-4] = 0
# shape_pred = renderer(vert, cam)
shape_pred = renderer.diff_vp(
vert, cam, angle=0, axis=[0, 1, 0], texture=None, extra_elev=True)
img_pred = renderer(vert, cam, texture=texture)
# Different viewpoints.
vp1 = renderer.diff_vp(
vert, cam, angle=30, axis=[0, 1, 0], texture=texture, extra_elev=True)
vp2 = renderer.diff_vp(
vert, cam, angle=60, axis=[0, 1, 0], texture=texture, extra_elev=True)
vp3 = renderer.diff_vp(
vert, cam, angle=60, axis=[1, 0, 0], texture=texture)
img = np.transpose(img, (1, 2, 0))
import matplotlib.pyplot as plt
plt.ion()
plt.figure(1)
plt.clf()
plt.subplot(231)
plt.imshow(img)
plt.title('input')
plt.axis('off')
plt.subplot(232)
plt.imshow(shape_pred)
plt.title('pred mesh')
plt.axis('off')
plt.subplot(233)
plt.imshow(img_pred)
plt.title('pred mesh w/texture')
plt.axis('off')
plt.subplot(234)
plt.imshow(vp1)
plt.title('different viewpoints')
plt.axis('off')
plt.subplot(235)
plt.imshow(vp2)
plt.axis('off')
plt.subplot(236)
plt.imshow(vp3)
plt.axis('off')
plt.draw()
#plt.show()
plt.savefig("figure_" + opts.name + "__" + str(opts.num_train_epoch) + "__" + str(opts.use_train) + ".png")
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
# ax = Axes3D(fig)#fig.add_subplot(111, projection='3d')
pts = vert.cpu().numpy()
# ax.scatter(pts[:,0], pts[:,1], pts[:,2], c='r', marker='o')
plt.scatter(pts[:,0], pts[:,1], c='r', marker='o')
plt.xlim([-1, 2])
plt.ylim([-1, 2])
plt.savefig("3dfigure.png")
# import ipdb
# ipdb.set_trace()
def main(_):
if opts.use_train >= 0:
print("using train dataset")
from .data import cub as cub_data
data_module = cub_data
dataloader = data_module.data_loader(opts, shuffle=False)
for i, b in enumerate(dataloader):
if i == opts.use_train:
batch = b
img = batch['img'].numpy().reshape([3, 224, 224])
print("img shape", img.shape)
# batch = dataloader[opts.use_train]
else:
img = preprocess_image(opts.img_path, img_size=opts.img_size)
batch = {'img': torch.Tensor(np.expand_dims(img, 0))}
predictor = pred_util.MeshPredictor(opts)
outputs = predictor.predict(batch)
# This is resolution
renderer = predictor.vis_rend
renderer.set_light_dir([0, 1, -1], 0.4)
if opts.use_train >= 0:
visualize(img, outputs, predictor.vis_rend)
else:
visualize(img, outputs, predictor.vis_rend)
if __name__ == '__main__':
opts.batch_size = 1
app.run(main)
| true |
2d361f33028aa74c6c99a84893bcd5316039c118 | Python | odacremjorge/pryecto_taller_ENDESYC | /app/controllers/HistorialController.py | UTF-8 | 2,351 | 2.59375 | 3 | [] | no_license | import os
import time
from app import db
from app import app
from flask import render_template, request, redirect, url_for, flash
from app.models.Historial import Historial
from PIL import Image #pip install pillow
import urllib.request
from werkzeug.utils import secure_filename
class HistorialController():
def __init__(self):
pass
def index(self):
historiales = Historial.query.all()
return render_template('historiales/index.html', title='Historial', historiales=historiales)
def crearHistorial(self):
return render_template('historiales/create.html', title='Nuevo Historial')
""" ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS """
def guardarHistorial(self):
if request.method == 'POST':
titulo = request.form['titulo']
descripcion = request.form['descripcion']
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file: #and allowed_file(file.filename):
filename = secure_filename(file.filename)
#guardar nombre
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
fecha = time.strftime("%Y%m%d%H%M%S")
imgPath = app.config['UPLOAD_FOLDER'] + filename
img = Image.open(imgPath)
img.save('app/static/uploads/'+fecha+'.png')
os.remove('app/static/uploads/'+filename)
newfilename = fecha+'.png'
historial = Historial(titulo=titulo, descripcion=descripcion, urlimage=newfilename)
db.session.add(historial)
db.session.commit()
flash('Historial creado exitosamente')
return redirect(url_for('historial_router.main'))
else:
flash('Allowed image types are -> png, jpg, jpeg, gif')
return redirect(request.url)
historialcontroller = HistorialController()
| true |
b5eb56d47db776f067a247ad86fd0653d5e37413 | Python | 7Aishwarya/Data-Structures-and-Algorithms | /Dynamic-Programming/MaximumSubarray-KadensAlgorithm.py | UTF-8 | 374 | 2.84375 | 3 | [] | no_license | class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cur_best = nums[0]
overall_best = nums[0]
for i in range(1, len(nums)):
cur_best = max(nums[i], cur_best + nums[i])
overall_best = max(overall_best, cur_best)
return overall_best
| true |
e49a8948105d02ba89f50dade6f756c20f3c8402 | Python | 988dengwenbo/meeting | /comnon/time_module.py | UTF-8 | 920 | 2.96875 | 3 | [] | no_license | import math
def pc_time_module_week(num):
if num == 0:
return num+1
elif num == 1:
return num
elif num == 2+1:
return num
elif num == 3:
return num+1
elif num == 4:
return num+1
elif num == 5:
return num+1
elif num == 6:
return num+1
def get_time_module_day(hours,min):
hours = hours
min = min
li = int(hours)*2+2
if 0<int(min)<15 or 30<int(min)<45:
span = 2
else:
span = 1
return [li, span]
print(pc_time_module_week(1))
print(get_time_module_day(0,15))
def pc_time_module_week(num):
if num == 0:
return num
elif num == 1:
return num
elif num == 2:
return num
elif num == 3:
return num
elif num == 4:
return num
elif num == 5:
return num
elif num == 6:
return num
def get_time_module_day(num):
...
| true |
3a2d0153f0736ac85051cdcdfc83c01940dd8938 | Python | csvchicago/BlackWinter | /stop.py | UTF-8 | 309 | 2.65625 | 3 | [] | no_license | from gpiozero import Robot
from time import sleep
#from gpiozero import Buzzer
#bz = Buzzer(14)
#bz.on()
from gpiozero import TonalBuzzer
from gpiozero.tones import Tone
b = TonalBuzzer(14)
b.play(Tone("A4"))
blackWinter = Robot(left=(7,8), right=(9,10))
blackWinter.forward()
sleep(1)
blackWinter.stop() | true |
f8e36a76e3ba8dd799926bd92bb04587ddffae09 | Python | Isdaril/python | /Hackerrank/towerbreakersrev.py | UTF-8 | 921 | 3.484375 | 3 | [] | no_license | import math
class Calculator:
def __init__(self):
self.alreadyFound = dict()
def findPrimeCount(self,n):
result = 1
if n in self.alreadyFound:
return self.alreadyFound[n]
if n == 1:
self.alreadyFound[1] = 0
return 0;
lim = math.floor(math.sqrt(n))
#print("sqrt of",n,"is:",lim)
for i in range(2,lim+2):
if n % i == 0:
#print(i,"is a divisor")
result = self.findPrimeCount(n/i) + 1
break
self.alreadyFound[n] = result
return result
T = int(input().strip())
calc = Calculator()
for i in range(T):
N = int(input().strip())
res = 0
array = [int(x) for x in input().split()]
for k in array:
a = calc.findPrimeCount(k)
print(k, a)
res ^= a
if res == 0:
print("2")
else:
print("1") | true |
4d326ab9348ef3eb59b08857cbc5e5d3f146c2dd | Python | julianpistorius/ds-playbooks | /irods/library/irods_user | UTF-8 | 15,880 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Provides an ansible module for creating, updating and removing iRODS users.
"""
import ssl
from ansible.module_utils.basic import AnsibleModule
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = """
---
module: irods_user
short_description: Create/Update/Remove iRODS user
version_added: "2.4"
description:
- Create iRODS user.
- Update iRODS user's type or password or info.
- Remove iRODS user.
options:
name:
description:
- Username of user
required: true
type: str
state:
description:
- Desired state to achieve
- Either present or absent
default: present
choices:
- present
- absent
type: str
type:
description:
- User's type
- Only meaningful when state is 'present'
- Type to change into if user exist with a different type
required: false
default: rodsuser
type: str
info:
description:
- User's info
- Only meaningful when state is 'present'
required: false
type: str
password:
description:
- User's password
- Only meaningful when state is 'present'
required: false
type: str
host:
description:
- Hostname of the iRODS server
required: true
type: str
port:
description:
- Port of the iRODS server
required: true
type: int
admin_user:
description:
- Username of the admin user
required: true
type: str
admin_password:
description:
- Password of the admin user
required: true
type: str
zone:
description:
- Zone of the admin user
- Zone of the user that the module operate on
required: true
type: str
requirements:
- python-irodsclient>=0.8.2
author:
- John Xu
"""
EXAMPLES = '''
# Create iRODS user of type rodsuser
- name: create user
irods_user:
name: test_user1
state: present
host: cyverse.org
port: 1247
admin_user: rods
admin_password: 1234
zone: tempZone
# Update password of test_user1
- name: update password
irods_user:
name: test_user1
password: foo
state: present
host: cyverse.org
port: 1247
admin_user: rods
admin_password: 1234
zone: tempZone
# Update info of test_user1
- name: update info
irods_user:
name: test_user1
info: some information
state: present
host: cyverse.org
port: 1247
admin_user: rods
admin_password: 1234
zone: tempZone
# Update type of test_user1
- name: update user type
irods_user:
name: test_user1
type: other_type
state: present
host: cyverse.org
port: 1247
admin_user: rods
admin_password: 1234
zone: tempZone
# Remove iRODS user
- name: remove user
irods_user:
name: test_user1
type: other_type
state: absent
host: cyverse.org
port: 1247
admin_user: rods
admin_password: 1234
zone: tempZone
'''
RETURN = '''
user:
description: user that has been changed
type: str
returned: always
exc:
description:
- type of last iRODS exception thrown
- empty string when none is thrown
type: str
returned: always
exc_msg:
description:
- message of the last iRODS exception thrown
- empty string when none is thrown
type: str
returned: always
'''
try:
USE_IRODS_CLIENT = True
from irods.session import iRODSSession
from irods.models import User
except ImportError:
USE_IRODS_CLIENT = False
class IRODSUserModule:
"""
Module class
"""
def __init__(self):
"""
Initialize the module
"""
# define argument
self.module_args = dict(
name=dict(type="str", required=True),
state=dict(type="str", choices=["present", "absent"],
default="present"),
type=dict(type="str", default="rodsuser", required=False),
info=dict(type="str", required=False),
password=dict(type="str", no_log=True, required=False),
host=dict(type="str", required=True),
port=dict(type="int", required=True),
admin_user=dict(type="str", no_log=True, required=True),
admin_password=dict(type="str", no_log=True, required=True),
zone=dict(type="str", required=True),
)
# result
self.result = dict(
changed=False,
user="",
exc="",
exc_msg=""
)
# init module
self.module = AnsibleModule(
argument_spec=self.module_args,
supports_check_mode=True
)
self.session = None
def run(self):
"""
Entry point for module class, method to be called to run the module
"""
# check param and env
self.sanity_check()
# only-check mode
if self.module.check_mode:
self.module.exit_json(**self.result)
self.init_session()
action = self.select_action()
action()
def _fail(self, msg, err=None):
"""
Failure routine, called when the operation failed
"""
if self.session:
self.session.cleanup()
if err:
self.module.fail_json(msg=msg + "\n" + str(err), **self.result)
else:
self.module.fail_json(msg=msg, **self.result)
def _success(self, msg=""):
"""
Success routine, called when the operation succeeds
"""
if msg:
self.result["message"] = msg
self.module.exit_json(**self.result)
def sanity_check(self):
"""
Check if python-irodsclient is installed
"""
# python-irodsclient is required at this point
if not USE_IRODS_CLIENT:
self._fail("python-irodsclient not installed")
def init_session(self):
"""
Initialize the iRODS session with iRODS server
"""
ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None,
cadata=None)
ssl_settings = {"ssl_context": ssl_context}
self.session = iRODSSession(
host=self.module.params["host"],
port=self.module.params["port"],
user=self.module.params["admin_user"],
password=self.module.params["admin_password"],
zone=self.module.params["zone"],
**ssl_settings)
def select_action(self):
"""
Dispatch action according to the argument passed to the module
"""
if self.module.params["state"] == "present":
return self.user_present
return self.user_absent
def user_present(self):
"""
Ensure user specified in the parameter are present
"""
# get username
username = self.module.params["name"]
if not username:
self._success("empty username")
user_type = self.module.params["type"]
info = self.module.params["info"]
password = self.module.params["password"]
# check if user exist
if not self._user_exist(username):
# create user
self._create_user(username, user_type)
self.result["user"] = username
if password is not None:
self._update_user_password(username, password)
if not self._check_password(username, password):
self._fail("inconsistent password after updating password")
if info is not None:
self._update_user_info(username, info)
else:
if user_type and self._user_type(username) != user_type:
# update user_type
self._update_user_type(username, user_type)
self.result["user"] = username
if info and self._user_info(username) != info:
# update info
self._update_user_info(username, info)
self.result["user"] = username
# check if password is consistent
if password is not None and not self._check_password(username, password):
self._update_user_password(username, password)
self.result["user"] = username
if not self._check_password(username, password):
self._fail("inconsistent password after updating password")
# check if user is present
if not self._user_exist(username):
self._fail("user disappear after creation, {}".format(username))
self._success()
def user_absent(self):
"""
Ensure user specified in the parameter are absent
"""
# get username
username = self.module.params["name"]
# check if user exist
if not self._user_exist(username):
self._success("user already absent")
# remove user
self._remove_user(username)
self.result["user"] = username
# check if user have been removed
if self._user_exist(username):
self._fail("user still exist after removal, {}".format(username))
self._success()
def _create_user(self, username, user_type):
"""
Create an iRODS user with the given username
"""
try:
self.session.users.create(username, user_type, user_zone=self.module.params["zone"])
self.result["changed"] = True
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
self._fail("Unable to create user {}".format(username), exc)
def _update_user_type(self, username, user_type):
"""
Update user's type
"""
self._update_user_attribute(username, "type", user_type)
def _update_user_info(self, username, info):
"""
Update user's info
"""
self._update_user_attribute(username, "info", info)
def _update_user_attribute(self, username, attirbute, value):
"""
Update an attribute of user, attirbute must exist
"""
try:
self.session.users.modify(username, attirbute, value,
user_zone=self.module.params["zone"])
self.result["changed"] = True
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
self._fail("Unable to update user's {} for {}".format(attirbute, username), exc)
def _update_user_password(self, username, password):
"""
Update user's password
"""
try:
self.session.users.modify(username, "password", password,
user_zone=self.module.params["zone"])
self.result["changed"] = True
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
self._fail("Unable to update passowrd for user {}".format(username), exc)
def _remove_user(self, username):
"""
Remove the iRODS user with the given username
"""
try:
self.session.users.remove(username, user_zone=self.module.params["zone"])
self.result["changed"] = True
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
self._fail("Unable to remove user {}".format(username), exc)
def _check_password(self, username, password):
"""
Check if able to login as user with given password
"""
try:
ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None,
cadata=None)
ssl_settings = {"ssl_context": ssl_context}
user_session = iRODSSession(
host=self.module.params["host"],
port=self.module.params["port"],
user=username,
password=password,
zone=self.module.params["zone"],
**ssl_settings)
# fetch self to test if credential is correct
user_session.users.get(username)
return True
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
return False
def _user_info(self, username):
"""
Get user's info
"""
query = self.session.query(User.name, User.info) \
.filter(User.name == username,
User.zone == self.module.params["zone"])
for result in query:
return result[User.info]
return None
def _user_type(self, username):
"""
Get the type of the user, None if user not exists
"""
try:
user = self.session.users.get(username, user_zone=self.module.params["zone"])
if not user:
return None
return user.type
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
self._fail("Unable to query user's type for {}".format(username), exc)
def _user_exist(self, username):
"""
Check if there exist an iRODS user with the given username
"""
try:
query = self.session.query(User.name) \
.filter(User.name == username,
User.zone == self.module.params["zone"])
for result in query:
if username == result[User.name]:
return True
return False
except Exception as exc:
# A broad catch on all exception type that could be raised by the
# call to irods module, since the possible exception types are
# not well documented.
self.result["exc"] = type(exc).__name__
self.result["exc_msg"] = str(exc)
self._fail("Unable to query irods user {}".format(username), exc)
def main():
"""
Entrypoint of the Ansible module
"""
module = IRODSUserModule()
module.run()
if __name__ == '__main__':
main()
| true |
f6423e96ce482349d87a6d0ff3526858fed62181 | Python | Stereo-Alex/Music_prediction_ting | /functions_for_music_predictor.py | UTF-8 | 2,413 | 2.921875 | 3 | [] | no_license | import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from IPython.display import Javascript
import pandas as pd
##################Part 1, conecting to the api and dowloading the data frames##############
#### Takes user input: (only works with the numerical part of the link or the uri)
def getting_user_playlist():
user_input = input("please paste your share playlist link")
return user_input
#### takes user input and conects to the api so that i can download the
#### returns a dictionary named Playlist_dict
def one_one(user_imput_url):
global sp
playlist = sp.user_playlist_tracks("spotify", user_imput_url)
long_json = playlist['items']
while playlist['next']:
playlist = sp.next(playlist)
long_json.extend(playlist['items'])
song_names = [i["track"]["name"] for i in long_json]
artist_names = [i["track"]["artists"][0]["name"] for i in long_json]
uris = [i["track"]["uri"] for i in long_json]
Playlist_dict = {
"Songs":song_names,
"Artists":artist_names,
"uri":uris,
}
return Playlist_dict
#### Fetches the data features from the data frame that was downloaded in one_one
#### Will return a data frame with the audio features
def returning_dict(data_frame):
dataframe_return = []
for uri in data_frame["uri"]:
features = sp.audio_features(uri)
temp_df = pd.DataFrame(features)
dataframe_return.append(temp_df)
return dataframe_return
#### defines main for the data importer, merges the previous two data frames
#### will return a data frame that then has to be saved, as well as all the info necessary
##### have to add keys for spotify api
def main():
user_input = getting_user_playlist()
data_frame = pd.DataFrame(one_one(user_input))
concatt = pd.concat(returning_dict(data_frame))
final_playlist = pd.merge(data_frame, concatt[['uri', 'acousticness', 'analysis_url', 'danceability',
'duration_ms', 'energy', 'id', 'instrumentalness', 'key', 'liveness',
'loudness', 'mode', 'speechiness', 'tempo', 'time_signature',
'track_href', 'type', 'valence']], on='uri')
print("the name of the data frame is final_playlist, use......"
"(final_playlist.to_csv(##whatever_name.csv)) to save it")
return final_playlist
| true |
06ae681332b674d8987fc6611ef07ae38da79bfa | Python | atlanticwave-sdx/atlanticwave-proto | /localctlr/LCRuleManager.py | UTF-8 | 9,633 | 2.671875 | 3 | [] | no_license | # Copyright 2018 - Sean Donovan
# AtlanticWave/SDX Project
import cPickle as pickle
from lib.AtlanticWaveManager import AtlanticWaveManager
from shared.ManagementLCRecoverRule import *
# List of rule statuses
RULE_STATUS_ACTIVE = 1
RULE_STATUS_DELETING = 2
RULE_STATUS_INSTALLING = 3
RULE_STATUS_REMOVED = 4
VALID_RULE_STATUSES = [RULE_STATUS_ACTIVE,
RULE_STATUS_DELETING,
RULE_STATUS_INSTALLING,
RULE_STATUS_REMOVED]
class LCRuleManagerError(Exception):
pass
class LCRuleManagerTypeError(TypeError):
pass
class LCRuleManagerValidationError(LCRuleManagerError):
pass
class LCRuleManagerDeletionError(LCRuleManagerError):
pass
class LCRuleManager(AtlanticWaveManager):
''' This keeps track of LCRules. It provideds a database for easier
filtering.
Singleton. '''
def __init__(self, loggeridprefix='localcontroller',
db_filename=':memory:'):
loggerid = loggeridprefix + '.lcrulemanager'
super(LCRuleManager, self).__init__(loggerid)
# Setup DB.
db_tuples = [('rule_table', 'lcrules')]
self._initialize_db(db_filename, db_tuples)
# Rule entry looks like:
# {cookie_value : {'status': RULE_STATUS_ACTIVE,
# 'rule': rule_value}}
self._valid_table_columns = ['cookie','switch_id','status','rule']
# Setup initial rules related stuff.
self._initial_rules_list = []
self.logger.warning("%s initialized: %s" % (self.__class__.__name__,
hex(id(self))))
def add_rule(self, cookie, switch_id, lcrule,
status=RULE_STATUS_INSTALLING):
# Insert LC rule into db, using the cookie and switch_id as the index
# Validate status
if status not in VALID_RULE_STATUSES:
raise LCRuleManagerTypeError("Status not valid: %s" % status)
textrule = pickle.dumps(lcrule)
# Confirm that we're not inserting a duplicate rule.
dupes = self._find_rules({'cookie':cookie,
'switch_id':switch_id})
if dupes != None:
for dupe in dupes:
(c,sid,lcr,stat) = dupe
if lcr == lcrule:
if isinstance(lcr, ManagementLCRecoverRule):
self.logger.debug("ManagementLCRecoverRule, ignored.")
else:
raise LCRuleManagerValidationError(
"Duplicate add_rule for %s:%s:%s" %
(cookie, switch_id, str(lcrule)))
# Translate rule into a string so it can be stored
self.rule_table.insert({'cookie':cookie,
'switch_id':switch_id,
'status':status,
'rule':textrule})
listrules = self._find_rules({})
self.logger.debug("%s - %s --- MCEVIK add_rule: listrules: %s" % (self.__class__.__name__, hex(id(self)), str(listrules)))
def rm_rule(self, cookie, switch_id):
# Remove LC rule identified by cookie and switch_id
record = self.rule_table.find_one(cookie=cookie, switch_id=switch_id)
if record != None:
self.rule_table.delete(cookie=cookie, switch_id=switch_id)
else:
raise LCRuleManagerDeletionError(
"Cannot delete %s:%s: doesn't exist" %
(cookie, switch_id))
def set_status(self, cookie, switch_id, status):
if status not in VALID_RULE_STATUSES:
raise LCRuleManagerValidationError(
"Invalid Rule Status provided: %s" % status)
# Changes the status of a particular rule
record = self.rule_table.find_one(cookie=cookie, switch_id=switch_id)
if record != None:
self.rule_table.update({'cookie':cookie,
'switch_id':switch_id,
'status':status},
['cookie','switch_id'])
listrules = self._find_rules({})
self.logger.debug("%s - %s --- MCEVIK set_status: listrules: %s" % (self.__class__.__name__, hex(id(self)), str(listrules)))
def _find_rules(self, filter={}):
# If filter=={}, return all rules.
# Returns a list of (cookie, switch_id, rule, status) tuples
# Validate the filter
if filter != None:
if type(filter) != dict:
raise LCRuleManagerTypeError("filter is not a dictionary: %s" %
type(filter))
for key in filter.keys():
if key not in self._valid_table_columns:
raise LCRuleManagerValidationError(
"filter column '%s' is not a valid filtering field %s" %
(key, self._valid_table_columns))
# Do the search on the table.
results = self.rule_table.find(**filter)
# Send Back results.
retval = [(x['cookie'],
x['switch_id'],
pickle.loads(str(x['rule'])),
x['status']) for x in results]
return retval
def list_all_rules(self, full_tuple=False):
rules = self.rule_table.find()
self.logger.debug("Retrieving all rules.")
if full_tuple:
retval = [(x['cookie'],
x['switch_id'],
pickle.loads(str(x['rule'])),
x['status']) for x in rules]
return retval
retval = [pickle.loads(str(x['rule'])) for x in rules]
return retval
def get_rules(self, cookie, switch_id, full_tuple=False):
''' Returns a list of all rules matching cookie and switch_id.
Generally, there will be only one rule, but there could be multiple.
If full_tuple==True, then a list of tuples will be returned:
(cookie, switch_id, rule, status)
'''
listrules = self._find_rules({})
self.logger.debug("%s - %s --- MCEVIK get_rules: listrules: %s" % (self.__class__.__name__, hex(id(self)), str(listrules)))
# Get the rule specified by cookie
rules = self.rule_table.find(cookie=cookie, switch_id=switch_id)
self.logger.debug("%s - %s --- MCEVIK get_rules: rules: %s" % (self.__class__.__name__, hex(id(self)), str(rules)))
if full_tuple:
retval = [(x['cookie'],
x['switch_id'],
pickle.loads(str(x['rule'])),
x['status']) for x in rules]
self.logger.debug("%s - %s --- MCEVIK get_rules: retval full_tuple: %s" % (self.__class__.__name__, hex(id(self)), str(retval)))
return retval
retval = [pickle.loads(str(x['rule'])) for x in rules]
self.logger.debug("%s - %s --- MCEVIK get_rules: retval : %s" % (self.__class__.__name__, hex(id(self)), str(retval)))
return retval
def add_initial_rule(self, rule, cookie, switch_id):
# Used during initial rule stage of inialization.
self.logger.debug(
"Adding a new rule to the _initial_rules_list: %s:%s:%s" %
(cookie, switch_id, rule.get_data()['rule']))
self._initial_rules_list.append((cookie, switch_id, rule))
def initial_rules_complete(self):
''' Returns two lists: rules for deletion, rules to be added. None of
the rules in either of these lists are added or removed from this
DB. This is just a service for the LC to make life a bit easier.
NOTE: clear_initial_rules() *must* be called afterwards.
'''
delete_list = []
add_list = []
# Build up the delete_list:
# Go through all installed rules. If it's not in the
# _initial_rules_list, add to delete list.
# Anything left over in the _initial_rules_list is now the add_list
# Empty the _initial_rules_list for the next reconnection.
# NOTE: _initial_rules_list is a list of SDXMessageInstallRules
self.logger.debug("IRC RULE_TABLE %s" % self.rule_table)
self.logger.debug("IRC _INITIAL_RULES_LIST %s" %
self._initial_rules_list)
list_of_c_and_s = [(x['cookie'], x['switch_id'])
for x in self.rule_table.find()]
c_and_s_from_irl = [(c,s) for (c,s,r) in self._initial_rules_list]
for t in list_of_c_and_s:
if t not in c_and_s_from_irl:
(c,s) = t
rules = self.get_rules(c,s, True)
for rule in rules:
(c,s,r,t) = rule
delete_list.append((r,c,s))
for t in c_and_s_from_irl:
if t not in list_of_c_and_s:
(c,s) = t
for (c1,s1,r1) in self._initial_rules_list:
if (c==c1 and s==s1):
add_list.append((r1,c1,s1))
return (delete_list, add_list)
def clear_initial_rules(self):
''' Called by LC once current set of initial rules are not needed
anymore.
NOTE: this *could* be done by initial_rules_complete, but it would
be a weird side effect that is dirty. As such, separate function. A
very complicated separate function.
'''
self.logger.debug("Clearing _initial_rules_list")
self._initial_rules_list = []
| true |
690b0cd91cf500da99389a0f58ca7db9aa693b67 | Python | mkornyev/scheduler | /myTime/management/commands/populate.py | UTF-8 | 2,552 | 2.5625 | 3 | [] | no_license | from django.core.management.base import BaseCommand
from datetime import datetime
from myTime.models import DailyHours, Location, Reservation, Report
# POPULATE SCRIPT
class Command(BaseCommand):
args = '<this func takes no args>'
help = 'A populate script for the current locations & hours.'
def _createLocations(self):
# HOURS
EIGHT_AM = datetime(1, 1, 1, 8, 0, 0, 0) # 8am
TEN_AM = datetime(1, 1, 1, 10, 0, 0, 0) # 10am
FIVE_PM = datetime(1, 1, 1, 17, 0, 0, 0) # 5pm
SEVEN_PM = datetime(1, 1, 1, 19, 0, 0, 0) # 7pm
# WEIGAND
weigand = Location.objects.create(
name='CMU Weigand Gymnasium',
token='weigand',
max_capacity=10
)
weigand.hours.add(
DailyHours.objects.create(day=0, opening=TEN_AM, closing=FIVE_PM), # SUN
DailyHours.objects.create(day=1, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=2, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=3, opening=EIGHT_AM, closing=SEVEN_PM), # WED
DailyHours.objects.create(day=4, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=5, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=6, opening=TEN_AM, closing=FIVE_PM), # SAT
)
weigand.save()
# WEIGHT ROOM
gym = Location.objects.create(
name='CMU Weight Room',
token='gym',
max_capacity=15
)
gym.hours.add(
DailyHours.objects.create(day=0, opening=TEN_AM, closing=FIVE_PM), # SUN
DailyHours.objects.create(day=1, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=2, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=3, opening=EIGHT_AM, closing=SEVEN_PM), # WED
DailyHours.objects.create(day=4, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=5, opening=EIGHT_AM, closing=SEVEN_PM),
DailyHours.objects.create(day=6, opening=TEN_AM, closing=FIVE_PM), # SAT
)
gym.save()
# RESERVATIONS
gym.reservations.add(
Reservation.objects.create(day=1, start_time=EIGHT_AM, end_time=TEN_AM),
Reservation.objects.create(day=3, start_time=EIGHT_AM, end_time=TEN_AM),
)
# REPORTS
gym.reports.add(
Report.objects.create(is_full=True, wait_time_minutes=15),
# Report.objects.create(is_full=False),
)
def handle(self, *args, **options):
self._createLocations() | true |
799ab62fa2e84b39723c59691682f1b6bb7209f2 | Python | avatar333/py2learning | /01-HelloWorld.py | UTF-8 | 2,157 | 4.34375 | 4 | [] | no_license |
#Import regexp module
import re
print ("Hello World!\tkTest")
# Print the output of a calculation
print (1+1)
# Assign value of calcuation to a variable
NUM1 = 1+1
# print a string and variable
print "NUM1 =", NUM1, NUM1
# split a string, specifying a delimiter, and then which element
print ("Hello World").split(" ")[0]
# Boolean examples
print 5 == 5
print "this" is "This"
print "this" is not "This"
# Lists
print ["This", "is", "a", "list"][0]
# Dictionaries
print {"Name": "Kevin", "Surname": "Pillay"}["Name"]
# Function example One
def function_one():
print ("Function One")
function_one()
# Function example Two
def function_two(str1, str2):
print (str1)
print (str2)
function_two("String 1", "String 2")
# Function example Three - settings defaults and keyword arguments
def function_three(name = "Someone", age = "Unknown"):
print "My name is", name, "and my age is ", age
function_three(age=3000, name="Narg")
# Function example Four - infinite arguments
def function_four(*array):
for item in array:
print"Item Value:",item
function_four("Item1", "Item2", "Item3")
# Function example Five - returning values
def function_five(num1, num2):
return num1+num2
print"Adding:",function_five(1,2)
# Conditional statements
VAR1 = 1
if VAR1 == 2:
print"VAR1 = 2"
elif VAR1 == 3:
print"VAR1 = 3"
else:
print"VAR1 not equal to 2 OR 3"
# Loop - "for" loop example
mylist = [1,2,3,4,5]
for element in mylist:
print element
# Loop - "while" loop example
state = True
myval = 1
while state:
if myval == 20:
print "All done!"
state = False
else:
print "myval =", myval
myval += 1
# RegEx
mystr = "THIS IS a string, with upper, lower case chars. It also has 1 2 3 4 numbers"
newstr = re.sub('[A-Z]', '', mystr)
print newstr
newstr = re.sub('[a-z]', '', mystr)
print newstr
newstr = re.sub('[,]', '!!', mystr)
print newstr
newstr = re.sub('[,.]', '!!', mystr)
print newstr
newstr = re.sub('[0-9A-Za-z]', '!!', mystr)
print newstr
newstr = re.sub('[0-9+" "]', '~', mystr)
print newstr
newstr = re.sub('[^0-9]', '~', mystr)
print newstr | true |
d78151dcb982721e40d1cb1386d42524963ebcd4 | Python | venkatadri123/Python_Programs | /Sample_programs/32max.py | UTF-8 | 153 | 3.640625 | 4 | [] | no_license | #To find a giggest number in a list.
l=[10,12,14,15,-20,22,11]
max=l[0]
n=len(l)
for i in range(1,n):
if l[i]>max:
max=l[i]
print('max=',max) | true |
f70cc154ead2abc8ce5a6a177da736265f57769f | Python | mooncrater31/pdfExtraction | /csvToPopularity.py | UTF-8 | 3,445 | 2.703125 | 3 | [] | no_license | import pandas as pd
import numpy
from time import time
import csv
import gc
import os
def make_popularity_csv(csvName,state,yearrange):
df = pd.read_csv(csvName)
names = df['elector_name'].values
nameDict = {}
for name in names:
for part in name.split(" "):
nameDict[part] = nameDict.get(part,0)+1
sortedNames = sorted(nameDict.items(),key = lambda x:x[1],reverse=True)
nameDict = {}
gc.collect()
total = 0
for tup in sortedNames:
total += int(tup[1])
namesWithSuffixArray = []
for tup in sortedNames:
namesWithSuffixArray.append([tup[0],tup[1],total,yearrange,state])
total -= int(tup[1])
with open(csvName.split(".")[0]+'_popularity.csv','w',newline='',encoding='UTF-8') as f:
writer = csv.writer(f)
writer.writerow(['Name','Popularity','Suffix Sum','Year Range','State'])
writer.writerows(namesWithSuffixArray)
import csv
def divideAccordingToBirthYear(CSV):
mydict = {}
with open(CSV,'r',newline='',encoding='UTF-8') as F:
reader = csv.reader(F)
for i,row in enumerate(reader):
if(i!=0):
age = int(row[7])
currYear= int(row[12])
birthYear = currYear-age
fileno = 0
if(float(birthYear)%5==0):
fileno = int((birthYear-1900)/5)
else:
fileno = int((birthYear-1900)/5)+1
print('age '+str(age)+' fileno :'+str(fileno)+' currYear :'+str(currYear))
if fileno in mydict:
mydict[fileno].append(row)
else :
mydict[fileno] = [row]
return mydict
def noToFile(fileno):
return str(1900+(fileno*5))+"_"+str(1900+fileno*5+4)+".csv"
def dictToFiles(mydict,state):
for key in mydict.keys():
filename = noToFile(key)
with open(filename,'w',newline='',encoding='UTF-8') as f:
writer = csv.writer(f)
writer.writerow(['global_number','number','voter_id', 'elector_name','father_or_husband_name','has_husband','house_no',
'age','sex','ac_name','parl_constituency','part_no','year','state','filename','main_town',
'police_station','mandal','revenue_division','district','pin_code','polling_station_name',
'polling_station_address','net_electors_male','net_electors_female','net_electors_third_gender',
'net_electors_total','change'])
writer.writerows(mydict[key])
make_popularity_csv(filename,state,filename.split(".")[0])
def csvYearWisePopularity(CSV,opCSVName,state):
mydict = divideAccordingToBirthYear(CSV)
dictToFiles(mydict,state)
dfLst = []
for key in mydict.keys():
dfLst.append(pd.read_csv(noToFile(key).split(".")[0]+'_popularity.csv'))
make_popularity_csv(CSV,'jk','all')
dfLst.append(pd.read_csv(CSV.split(".")[0]+'_popularity.csv'))
cc = pd.concat(dfLst)
cc.to_csv(opCSVName,index=False)
for key in mydict.keys(): #Deletion of temporary files
filename1 = noToFile(key)
filename2 = noToFile(key).split(".")[0]+'_popularity.csv'
if(os.path.isfile(filename1)):
os.remove(filename1)
if(os.path.isfile(filename2)):
os.remove(filename2)
| true |
b1605cb7512204e42ab0ae22d4bafcc0a23e8b0e | Python | diegoPaladino/alarme_temporal | /tabela/format_string.py | UTF-8 | 705 | 3.171875 | 3 | [] | no_license | # format_string
# source: https://stackoverflow.com/questions/53908134/what-is-20-format-string-meaning-in-python
popularity = [["Language", 2017, 2012, 2007, 2002, 1997, 1992, 1987],
["Java", 1, 2, 1, 1, 15, 0, 0],
["C", 2, 1, 2, 2, 1, 1, 1],
["C++", 3, 3, 3, 3, 2, 2, 5],
["C#", 4, 4, 7, 13, 0, 0, 0],
["Python", 5, 7, 6, 11, 27, 0, 0],
["Visual Basic .NET", 6, 17, 0, 0, 0, 0, 0],
["PHP", 7, 6, 4, 5, 0, 0, 0],
["JavaScript", 8, 9, 8, 7, 23, 0, 0],
["Perl", 9, 8, 5, 4, 4, 10, 0]]
format_string = "{:<20} {:>4} {:>4} {:>4} {:>4} {:>4} {:>4} {:>4}"
for l in popularity: print(format_string.format(*l)) | true |
5373dccf58a8d45c734145302a3c1e887ddabde8 | Python | vishwanath79/PythonMisc | /20Pythonlibs/collection.py | UTF-8 | 351 | 3.125 | 3 | [] | no_license | from collections import OrderedDict, defaultdict, namedtuple
from string import ascii_lowercase
print(OrderedDict(zip(ascii_lowercase, range(4))))
# specify a default value for all new keys
d = defaultdict(list)
print(d['a'])
A = namedtuple('A', 'count enabled color')
tup = A(count=1, enabled=True, color="red")
print(tup.count)
print(tup.color)
| true |
8713091ae7ceb5ba5d6fb628365509655664341a | Python | cilame/any-whim | /感兴趣的算法/QQ_TEA算法.py | UTF-8 | 2,899 | 2.796875 | 3 | [] | no_license | import struct
def Hex2Bytes(hexstr:str):
strBytes = hexstr.strip()
pkt = bytes.fromhex(strBytes)
return pkt
class QQ_TEA():
""" QQ TEA 加解密, 64比特明码, 128比特密钥 """
def __init__(self, secret_key):
self.secret_key = secret_key
def xor(self,a, b):
op = 0xffffffff
a1,a2 = struct.unpack(b'>LL', a[0:8])
b1,b2 = struct.unpack(b'>LL', b[0:8])
return struct.pack(b'>LL', ( a1 ^ b1) & op, ( a2 ^ b2) & op)
def code(self,v, k):
n=16
op = 0xffffffff
delta = 0x9e3779b9
k = struct.unpack(b'>LLLL', k[0:16])
y, z = struct.unpack(b'>LL', v[0:8])
s = 0
for i in range(n):
s += delta
y += (op &(z<<4))+ k[0] ^ z+ s ^ (op&(z>>5)) + k[1]
y &= op
z += (op &(y<<4))+ k[2] ^ y+ s ^ (op&(y>>5)) + k[3]
z &= op
r = struct.pack(b'>LL',y,z)
return r
def decipher(self,v, k):
n = 16
op = 0xffffffff
y, z = struct.unpack(b'>LL', v[0:8])
a, b, c, d = struct.unpack(b'>LLLL', k[0:16])
delta = 0x9E3779B9
s = (delta << 4)&op
for i in range(n):
z -= ((y<<4)+c) ^ (y+s) ^ ((y>>5) + d)
z &= op
y -= ((z<<4)+a) ^ (z+s) ^ ((z>>5) + b)
y &= op
s -= delta
s &= op
return struct.pack(b'>LL', y, z)
def encrypt(self,v):
END_CHAR = b'\0'
FILL_N_OR = 0xF8
vl = len(v)
filln = (8-(vl+2))%8 + 2
fills = b''
for i in range(filln):
fills = fills + bytes([220])
v = ( bytes([(filln -2)|FILL_N_OR])
+ fills
+ v
+ END_CHAR * 7)
tr = b'\0'*8
to = b'\0'*8
r = b''
o = b'\0' * 8
for i in range(0, len(v), 8):
o = self.xor(v[i:i+8], tr)
tr = self.xor( self.code(o, self.secret_key), to)
to = o
r += tr
return r
def decrypt(self,v):
l = len(v)
prePlain = self.decipher(v, self.secret_key)
pos = (prePlain[0] & 0x07) +2
r = prePlain
preCrypt = v[0:8]
for i in range(8, l, 8):
x = self.xor(self.decipher(self.xor(v[i:i+8], prePlain),self.secret_key ), preCrypt)
prePlain = self.xor(x, preCrypt)
preCrypt = v[i:i+8]
r += x
if r[-7:] != b'\0'*7:
return None
return r[pos+1:-7]
if __name__ == '__main__':
secret_key = Hex2Bytes('11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11')
QQ = QQ_TEA(secret_key)
origin = '你好啊兄弟,'
plaintext = bytes(origin,encoding = "utf-8")
enc = QQ.encrypt(plaintext)
dec = QQ.decrypt(enc)
print(origin)
print(plaintext)
print(enc)
print(dec)
print(dec.decode()) | true |
5bdc662e8c529907cdb479eb36cdb9aec3414a0e | Python | Aasthaengg/IBMdataset | /Python_codes/p00005/s129430195.py | UTF-8 | 149 | 2.609375 | 3 | [] | no_license | import sys
from fractions import gcd
[print("{} {}".format(gcd(*x), x[0] * x[1] // gcd(*x))) for x in [list(map(int, x.split())) for x in sys.stdin]] | true |
8fbc9d787c31a78fea0068ce89455ee37ad0eaad | Python | o11c/typeshed | /builtins/2.7/_random.pyi | UTF-8 | 365 | 2.59375 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | from typing import Optional, Union, Any
class Random(object):
def __init__(self, seed: Optional[Union[int, Any]] = ..., object = ...) -> None: ...
def getstate(self) -> tuple: ...
def setstate(self, state: tuple) -> None: ...
def random(self) -> float: ...
def getrandbits(self, k: int) -> int: ...
def jumpahead(self, i: int) -> None: ...
| true |
af323b3cd19159ab09ea36daf6c822bf9918e9de | Python | rczyrnik/ProjectEuler | /E037_TruncatablePrimes.py | UTF-8 | 1,288 | 4.1875 | 4 | [] | no_license | '''
The number 3797 has an interesting property. Being prime itself,
it is possible to continuously remove digits from left to right,
and remain prime at each stage: 3797, 797, 97, and 7.
Similarly we can work from right to left: 3797, 379, 37, and 3.
Find the sum of the only eleven primes that are
both truncatable from left to right and right to left.
NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
'''
def left_to_right(num):
length = len(num)
for x in range(1, length):
if num[x:] not in primes:
return False
return True
def right_to_left(num):
length = len(num)
for x in range(1, length):
if num[0:-x] not in primes:
return False
return True
if __name__ == "__main__":
with open("PrimesToOneMillion.txt") as f:
data = f.read()
primes = [x.strip('\n') for x in data.split(',')]
primes1379 = []
bad_nums = '024568'
good_nums = '379'
for x in primes:
if not any(num in x[1:] for num in bad_nums):
primes1379.append(x)
my_sum = 0
for p in primes1379:
if left_to_right(p) and right_to_left(p):
print(p)
if int(p) >10: my_sum+=int(p)
print(my_sum)
# print('3137' in primes)
# print(primes379)
| true |
b0403a8800cf4ae8e30a8b28657887fc956d6300 | Python | mahtabfarrokh/classic-search | /Astar.py | UTF-8 | 2,436 | 3.0625 | 3 | [] | no_license | class AStar:
def __init__(self, initial_state, actions, result, goal_test, get_cost, heuristic):
self.f = []
self.e = []
self.res = []
self.visited = []
self.initial_state = initial_state
self.actions = actions
self.result = result
self.goal_test = goal_test
self.get_cost = get_cost
self.heuristic = heuristic
self.max_memory = 0
def search(self):
while self.f:
self.max_memory = max(self.max_memory, len(self.f) + len(self.e))
x = self.f[0]
minv = x[1]
path = x[0]
udru = x[2]
heur = x[3]
for v in self.f:
if v[1] + v[3] < minv + heur:
minv = v[1]
path = v[0]
udru = v[2]
heur = v[3]
node = path[-1]
for v in self.f:
p = v[0]
if p[-1] == node and v[1] + v[3] > minv + heur:
self.f.remove(v)
if self.goal_test(node):
return [udru, path, minv, heur]
self.f.remove([path, minv, udru, heur])
if node not in self.e:
self.e.append(node)
for act in self.actions(node):
v = self.result(node, act)
if v not in self.e:
c = self.get_cost(path[0], v) + minv
heur = self.heuristic(v)
path2 = []
for p in path:
path2.append(p)
path2.append(v)
udru2 = []
for m in udru:
udru2.append(m)
udru2.append(act)
self.f.append([path2, c, udru2, heur])
if v not in self.visited:
self.visited.append(v)
def search_astar(self):
start = self.initial_state()
self.f = [[[start], 0, [], 0]]
self.e = []
self.res = []
p = self.search()
if not p:
print("there is no path")
else:
print("path found: ")
print(p[0])
print("num visited: ", len(self.visited))
print("num closed list: ", len(self.e))
print("max memory use: ", self.max_memory)
print("path cost : ", p[2]+ p[3])
| true |
5416ded41a7152f0836a18b4206cb37ee862d271 | Python | fjparedesb/cursos | /python/beyond_basics/password_check.py | UTF-8 | 251 | 3.75 | 4 | [] | no_license |
correct_password = "123"
name = input("Ingrese su nombre: ")
password = input("Ingrese su contraseña: ")
while correct_password != password:
password = input("Contraseña erronea, intente de nuevo: ")
print("Hola %s ya estas logueado" % name) | true |
d0ebba677f186fbb76039cbe0aae9320b79328a1 | Python | BlackdogCEO/my-leetcode-venture | /7.reverse-integer.py | UTF-8 | 683 | 3 | 3 | [] | no_license | class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
b = 0
a = 0
c = 1
if x < 0:
x = -x
c = -1
while x != 0:
if (2147483647 - a) / 10 < b:
a = b = 0
break
b = b * 10 + a
a = x % 10
x = x // 10
if (2147483647 - a) / 10 < b:
a = b = 0
b = b * 10 + a
return c * b
"""
Runtime: 56 ms, faster than 83.34% of Python3 online submissions for Reverse Integer.
the point is to examine overflow of multiple and sum
i use mod and round to get each number
"""
| true |
deb2b0e14831055cc3f44af9c9584aad126a171d | Python | samlex20/Google-Maps-Scraper | /mapsscraper.py | UTF-8 | 5,068 | 2.90625 | 3 | [] | no_license | import requests, json, time, csv, sys
# Covers entire northern virginia
# 38.837211,-77.412990 3 mi
# 38.916717,-77.503911 4mi
# 38.915541,-77.404331 3.5 mi
# 39.025810,-77.393600 4 mi
# 38.921594,-77.248507 4 mi
# 38.843642,-77.284189 3.7 mi
# 38.845399,-77.107912 4 mi
# 38.756228,-77.477422 4.3 mi
# 38.654806,-77.261290 8 mi
# 38.758395,-77.139508 3.7mi
# 38.826382,-77.660691 5 mi
# 39.105553,-77.564537 6 mi
# 38.828603, -77.455112 16 mi
businessList = []
apiKey = "YOURMAPSAPI"
coordinates = ["38.837211,-77.412990", "38.916717,-77.503911", "38.915541,-77.404331" , "39.025810,-77.393600"
, "38.921594,-77.248507", "38.843642,-77.284189", "38.845399,-77.107912", "38.756228,-77.477422"
, "38.654806,-77.261290", "38.758395,-77.139508", "38.826382,-77.660691", "39.105553,-77.564537"
, "38.828603,-77.455112"]
radiusList = ["4800", "6400", "5600", "6400", "6400", "5950", "6400", "6900", "12800", "5950", "8000", "9600", "25000"]
numOfBusinesses = 0
duplicates = 0
searchType = ["nearbysearch", "textsearch"]
keyword = ["remodel", "kitchen+bath", "kitchen+cabinets", "kitchen+and+bath"]
def main():
global numOfBusinesses
global duplicates
global keyword
global searchType
for k in range(len(keyword)):
for z in range(2):
print("Running turn: " + str(z))
print("Search Type: " + searchType[z % 2])
print("Keyword: " + keyword[k])
start = time.time()
businessFinder(searchType[z % 2], keyword[k])
end = time.time()
print("Total of " + str(numOfBusinesses) +" businesses found")
print("Dublicates found: " + str(duplicates))
print("New records: " + str(numOfBusinesses - duplicates))
print("Took " + str(end - start) + "seconds")
duplicates = 0
time.sleep(10)
print("Searching process completed.")
print("Duplicates found: " + str(duplicates))
print("Writing to file...")
writeToFile(businessList)
print("Writing Completed.")
print("Done")
def businessFinder(sType, sWord):
global numOfBusinesses
numOfBusinesses = 0
for k in range(len(radiusList)):
response = requests.get("https://maps.googleapis.com/maps/api/place/"+ sType + "/json?"
+ "location=" + coordinates[k]
+ "&radius=" + radiusList[k]
+ "&keyword=" + sWord # KEYWORD
+ "&query=" + sWord # KEYWORD
+ "&key=" + apiKey)
while True:
result = response.json()
bList = result["results"]
if len(bList) == 0:
print("PROBLEM DETECTED: " + result)
break
# iterate trough businesses
for i in range(len(bList)):
# Convert dict to JSON then JSON to Python dict
info = json.dumps(bList[i])
info = json.loads(info)
numOfBusinesses += 1
infoReader(info, numOfBusinesses)
if "next_page_token" in result:
response.close()
time.sleep(5)
response = requests.get("https://maps.googleapis.com/maps/api/place/"+ sType + "/json?"
+ "pagetoken=" + result["next_page_token"] # next page
+ "&key=" + apiKey)
else:
time.sleep(5)
print("exited")
break
def infoReader(info, k):
print('.', end='', flush=True)
if dublicateChecker(info):
time.sleep(1)
getter = requests.get("https://maps.googleapis.com/maps/api/place/details/json?"
+ "place_id=" + info["place_id"]
+ "&fields=name,website,formatted_phone_number,url"
+ "&key=" + apiKey)
result = getter.json() # creates json object
result = result["result"]
result = json.dumps(result)
result = json.loads(result)
business = {
"place_id": info["place_id"],
"name": info["name"],
}
if "address" in info:
business["address"] = info["formatted_address"]
if "formatted_phone_number" in result:
business["phone"] = result["formatted_phone_number"]
if "website" in result:
business["website"] = result["website"]
if "url" in result:
business["google"] = result["url"]
businessList.append(business)
def dublicateChecker(info):
for i in range(len(businessList)):
if info["place_id"] == businessList[i]["place_id"]:
global duplicates
duplicates += 1
return False
return True
def writeToFile(bList):
keys = bList[0].keys()
with open('test.csv', 'w', newline='') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(bList)
if __name__ == "__main__":
main() | true |
20f6da7cdf344ce1f5bb9a969fc5b7a6774343c4 | Python | andrewrosenkilde/LPTHW | /Exercises/ex4.py | UTF-8 | 1,388 | 4.28125 | 4 | [] | no_license | #defines the variable "cars".
cars = 100
# defines the variable space_in_a_car
space_in_a_car = 4
# defines the variable drivers
drivers = 30
#defines the variable passengers
passengers = 90
#defines the variable cars_not_driven as the math of
# cars - drivers
cars_not_driven = cars - drivers
# defines the variable cars_driven as the value of
# the variable drivers
cars_driven = drivers
# defines carpool_capacity as the math of
# cars_driven * space_in_a_car
carpool_capacity = cars_driven * space_in_a_car
# defines the variable average_passengers_per_car
# as the math of passengers / cars_driven
average_passengers_per_car = passengers / cars_driven
# prints each variable in a string
print "There are", cars, "cars available."
print "There are only", drivers, "drivers available."
print "There will be", cars_not_driven, "empty cars today."
print "We can transport", carpool_capacity, "people today."
print "We have", passengers, "to carpool today."
print "We need to put about", average_passengers_per_car, "in each car."
# Study Drill 0's error means that the author
# did not define the variable car_pool_capacity
# and then called for the variable on line 8 when it
# did not exist.
# Study Drill 1:
# It is not necessary, you get 120 instead of 120.0
# if you use a whole number instead of a float.
#CSQ 3:
print "Hey %s there." % "you"
# vs:
print "Hey", "you", "there." | true |
8fb0a5fd4cd75e8b67f1b0ba21fb2f8a6719b58d | Python | anna-jego/simplon_nantes | /hanoi/boulangerie-1-a-completer-checkpoint.py | UTF-8 | 3,500 | 3.953125 | 4 | [] | no_license | # Micro-monde économique de la boulangerie
# Gestion de la production
class Produit: # Classe abstraite
def __init__(self):
self.quantite = 0
self._type = 'Produit non défini' # Attribut protégé
def __repr__(self):
return self._type + ' : ' + str(self.quantite)
class Pain(Produit):
def __init__(self, quantite=0):
self.quantite = quantite
self._type = 'Pain' # Attribut protégé
class Ble(Produit):
def __init__(self, quantite=0):
Produit.__init__(self)
self.quantite = quantite
self._type = 'Blé' # Attribut protégé
class Farine(Produit):
def __init__(self, quantite=0):
Produit.__init__(self)
self.quantite = quantite
self._type = 'Farine' # Attribut protégé
class Personnage: # Classe abstraite
def __init__(self):
# Tous les personnages peuvent posséder du pain
# Chaque personnage commence le jeu avec 2 pains
self.pain = Pain(2)
def manger(self):
if self.pain.quantite > 0:
self.pain.quantite -= 1
return True # Mon personnage a reussi à manger
else:
return False # Mon personnage n'a pas pu manger
def produire(self): # Méthode abstraite
# >>> Début de votre code
pass
# Fin de votre code <<<
class Paysan(Personnage):
def __init__(self):
Personnage.__init__(self)
self.ble = Ble()
def __repr__(self):
representation = 'Paysan' + '\n\t' + \
repr(self.pain) + '\n\t' +\
repr(self.ble)
return representation
def produire(self):
"""
Le Paysan produit du blé
On considère que c'est toujours possible
Renvoie Vrai
"""
# >>> Début de votre code
pass
# Fin de votre code <<<
class Meunier(Personnage):
def __init__(self):
Personnage.__init__(self)
self.farine = Farine()
self.ble = Ble()
def __repr__(self):
representation = 'Meunier' + '\n\t' + \
repr(self.pain) + '\n\t' +\
repr(self.ble) + '\n\t' +\
repr(self.farine)
return representation
def produire(self):
"""
Le meunier produit de la farine s'il a du blé
renvoie: True s'il a pu produire
False sinon
"""
# >>> Début de votre code
pass
# Fin de votre code <<<
class Boulanger(Personnage):
def __init__(self):
Personnage.__init__(self)
self.farine = Farine()
def __repr__(self):
representation = 'Boulanger' + '\n\t' + \
repr(self.pain) + '\n\t' +\
repr(self.farine)
return representation
def produire(self):
"""
Le boulanger produit du pain s'il a de la farine
renvoie: True s'il a pu produire
False sinon
"""
# >>> Début de votre code
pass
# Fin de votre code <<<
# ### Programme Principal ### #
paysan = Paysan()
paysan.produire()
print(paysan)
# meunier = Meunier()
# print(meunier)
# meunier.ble.quantite = 1
# print(meunier)
# if meunier.produire():
# print('Le meunier produit de la farine')
# else:
# print('Production impossible, le meunier n\' a pas de blé')
# print(meunier)
# Tester la production du boulanger possible / impossible
# >>> Début de votre code
pass
# Fin de votre code <<<
| true |