blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
81a20ede57a2393e8832da6e660876845740fb71 | Python | ian-chelaru/Puzzle | /controller.py | UTF-8 | 1,739 | 3.03125 | 3 | [] | no_license | from problem import Problem
class Controller:
def __init__(self, problem):
self.__problem = problem
def get_problem(self):
return self.__problem
def bfs(self, root):
queue = [root]
while len(queue) > 0:
current_state = queue.pop(0)
if current_state.get_values()[-1] == self.__problem.get_final_config():
return current_state
queue += Problem.expand(current_state)
# def bfs(self, root):
# queue = [root]
# visited = []
# while len(queue) > 0:
# current_state = queue.pop(0)
# current_config = current_state.get_values()[-1]
# visited.append(current_config)
# if current_config == self.__problem.get_final_config():
# return current_state
# aux = []
# for state in Problem.expand(current_state):
# if state.get_values()[-1] not in visited:
# aux.append(state)
# queue += aux[:]
def gbfs(self, root):
# states to visit
to_visit = [root]
# configurations visited
visited = []
while len(to_visit) > 0:
current_state = to_visit.pop(0)
current_config = current_state.get_values()[-1]
visited.append(current_config)
if current_config == self.__problem.get_final_config():
return current_state
aux = []
for state in Problem.expand(current_state):
if state.get_values()[-1] not in visited:
aux.append(state)
aux.sort(key=lambda x: self.__problem.heuristics(x))
to_visit = aux[:] + to_visit
| true |
b3fc1fd3ba4f1a2f11d715ef5efed35a00b7d620 | Python | austinogiza/meeting-planner | /meetings/models.py | UTF-8 | 734 | 2.734375 | 3 | [] | no_license | from django.db import models
from datetime import date, time
# Create your models here.
class Meetings(models.Model):
title = models.CharField(max_length=500)
date = models.DateField()
start_time = models.TimeField()
duration = models.IntegerField()
room = models.ForeignKey('Room', on_delete=models.CASCADE)
def __str__(self):
return f"{self.title} at {self.start_time} on {self.date}"
class Meta:
verbose_name_plural = 'Meetings'
class Room(models.Model):
name = models.CharField(max_length=500)
floor_number = models.IntegerField()
room_number = models.IntegerField()
def __str__(self):
return f"{self.name}: room {self.room_number} on {self.floor_number}" | true |
08bd9b2b70504ec645ed157293d0661bb1cb3246 | Python | parampavar/incubator-dolphinscheduler | /tools/release/github/changelog.py | UTF-8 | 5,400 | 2.8125 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Github utils for release changelog."""
from typing import Dict, List
class Changelog:
"""Generate changelog according specific pull requests list.
Each pull requests will only once in final result. If pull requests have more than one label we need,
will classify to high priority label type, currently priority is
`feature > bug > improvement > document > chore`. pr will into feature section if it with both `feature`,
`improvement`, `document` label.
:param prs: pull requests list.
"""
key_number = "number"
key_labels = "labels"
key_name = "name"
label_feature = "feature"
label_bug = "bug"
label_improvement = "improvement"
label_document = "document"
label_chore = "chore"
changelog_prefix = "\n\n<details><summary>Click to expand</summary>\n\n"
changelog_suffix = "\n\n</details>\n"
def __init__(self, prs: List[Dict]):
self.prs = prs
self.features = []
self.bugfixs = []
self.improvements = []
self.documents = []
self.chores = []
def generate(self) -> str:
"""Generate changelog."""
self.classify()
final = []
if self.features:
detail = f"## Feature{self.changelog_prefix}{self._convert(self.features)}{self.changelog_suffix}"
final.append(detail)
if self.improvements:
detail = (
f"## Improvement{self.changelog_prefix}"
f"{self._convert(self.improvements)}{self.changelog_suffix}"
)
final.append(detail)
if self.bugfixs:
detail = f"## Bugfix{self.changelog_prefix}{self._convert(self.bugfixs)}{self.changelog_suffix}"
final.append(detail)
if self.documents:
detail = (
f"## Document{self.changelog_prefix}"
f"{self._convert(self.documents)}{self.changelog_suffix}"
)
final.append(detail)
if self.chores:
detail = f"## Chore{self.changelog_prefix}{self._convert(self.chores)}{self.changelog_suffix}"
final.append(detail)
return "\n".join(final)
@staticmethod
def _convert(prs: List[Dict]) -> str:
"""Convert pull requests into changelog item text."""
return "\n".join(
[f"- {pr['title']} (#{pr['number']}) @{pr['user']['login']}" for pr in prs]
)
def classify(self) -> None:
"""Classify pull requests different kinds of section in changelog.
Each pull requests only belongs to one single classification.
"""
for pr in self.prs:
if self.key_labels not in pr:
raise KeyError("PR %s do not have labels", pr[self.key_number])
if self._is_feature(pr):
self.features.append(pr)
elif self._is_bugfix(pr):
self.bugfixs.append(pr)
elif self._is_improvement(pr):
self.improvements.append(pr)
elif self._is_document(pr):
self.documents.append(pr)
elif self._is_chore(pr):
self.chores.append(pr)
else:
raise KeyError(
"There must at least one of labels `feature|bug|improvement|document|chore`"
"but it do not, pr: %s",
pr["html_url"],
)
def _is_feature(self, pr: Dict) -> bool:
"""Belong to feature pull requests."""
return any(
[
label[self.key_name] == self.label_feature
for label in pr[self.key_labels]
]
)
def _is_bugfix(self, pr: Dict) -> bool:
"""Belong to bugfix pull requests."""
return any(
[label[self.key_name] == self.label_bug for label in pr[self.key_labels]]
)
def _is_improvement(self, pr: Dict) -> bool:
"""Belong to improvement pull requests."""
return any(
[
label[self.key_name] == self.label_improvement
for label in pr[self.key_labels]
]
)
def _is_document(self, pr: Dict) -> bool:
"""Belong to document pull requests."""
return any(
[
label[self.key_name] == self.label_document
for label in pr[self.key_labels]
]
)
def _is_chore(self, pr: Dict) -> bool:
"""Belong to chore pull requests."""
return any(
[label[self.key_name] == self.label_chore for label in pr[self.key_labels]]
)
| true |
12c8b45e68b0affee313e1bab6be4c962e881746 | Python | akshaychawla/Accelerated-Training-by-disentangling-neural-representations | /loss_layers.py | UTF-8 | 2,420 | 3.203125 | 3 | [
"MIT"
] | permissive | import keras.backend as K
from keras.layers import Lambda, concatenate
def triplet_loss(y_true, y_pred):
"""
y_true : FAKE
y_pred : (3,embedding_units) vector
"""
alpha = 0.1
anchor = y_pred[0, :]
positive = y_pred[1,:]
negative = y_pred[2,:]
loss = K.sqrt(K.sum(K.square(anchor-positive))) - K.sqrt(K.sum(K.square(anchor-negative))) + alpha
loss = K.maximum(0.0, loss)
return loss
def triplet_loss_batched_wrapper(num_triplets):
"""
num_triplets is the number of triplets that will be given by the
network output.
i.e the network output y_pred will be of shape (num_triplets*3, embedding_units)
Using a Closure type approach
"""
def triplet_loss_batched(y_true, y_pred):
"""
y_true : FAKE
y_pred : (num_triplets*3, embedding_units) matrix
"""
alpha = 0.1
anchors = y_pred[0: num_triplets, :]
positives = y_pred[num_triplets: num_triplets*2, :]
negatives = y_pred[num_triplets*2: num_triplets*3, :]
loss_per_sample = K.sqrt(K.sum(K.square(anchors-positives), axis=-1)) - K.sqrt(K.sum(K.square(anchors-negatives), axis=-1)) + alpha
loss_per_sample = K.maximum(0.0, loss_per_sample)
loss_batch = K.mean(loss_per_sample, axis=0)
return loss_batch
return triplet_loss_batched
def wrapper_categorical_crossentropy(num_triplets):
"""
y_true : one-hot (anc*n, pos*n, neg*n) X 10
y_pred : softmax (anc*n, pos*n, neg*n) X 10
Since the anchor and positive vectors are of the same class,
this function isolates the anchor and negative instances and
uses them for training.
"""
select_anchors = lambda x: x[:num_triplets, :]
select_negatives = lambda x: x[num_triplets*2 : num_triplets*3, :]
select_positives = lambda x: x[num_triplets : num_triplets*2, :]
def custom_cat_ce(y_true, y_pred):
true_ancs = Lambda(select_anchors)(y_true)
true_negs = Lambda(select_negatives)(y_true)
true_poss = Lambda(select_positives)(y_true)
pred_ancs = Lambda(select_anchors)(y_pred)
pred_negs = Lambda(select_negatives)(y_pred)
pred_poss = Lambda(select_positives)(y_pred)
return K.categorical_crossentropy(
concatenate([true_ancs, true_negs, true_poss]),
concatenate([pred_ancs, pred_negs, pred_poss])
)
return custom_cat_ce
| true |
6db350a58a8ad622463aff8706c2f730b8d9b83b | Python | rrichy/boilerplate-time-calculator | /time_calculator.py | UTF-8 | 1,172 | 3.390625 | 3 | [] | no_license | def add_time(start, duration, day = None):
hh, mm, nn = start.replace(':', ' ').split()
days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
time_start = 0
if hh == '12' and nn == 'AM': time_start = int(mm)
elif nn == 'AM': time_start = int(hh) * 60 + int(mm)
elif hh == '12' and nn == 'PM': time_start = 720 + int(mm)
else: time_start = 720 + int(hh) * 60 + int(mm)
hh, mm = duration.split(':')
time_add = int(hh) * 60 + int(mm)
whole_min = time_start + time_add
new_day = whole_min // 1440
new_min = whole_min % 1440
new_hour = new_min // 60
hour_string = str(new_hour if new_hour <= 12 and new_hour > 0 else 12 if new_hour == 0 else new_hour - 12)
minute_string = ':{0:02d}'.format(new_min % 60)
nn_string = ' AM' if new_min // 60 < 12 else ' PM'
day_string = ''
if day is not None:
day_string = ', ' + days[(days.index(day.lower()) + new_day) % 7].capitalize()
if new_day == 1: day_string += ' (next day)'
if new_day > 1: day_string += ' (' + str(new_day) + ' days later)'
return hour_string + minute_string + nn_string + day_string | true |
df6fde5963642da6e86b746d3345b523cf7be202 | Python | ahmadrana24/Game-code-Python | /GAME FINAL.py | UTF-8 | 120,846 | 3.109375 | 3 | [] | no_license | import time
import sys
import turtle as t
def drawline(x1,y1,x2,y2):
t.speed(4)
t.penup()
t.goto(x1,y1)
t.pendown()
t.goto(x2,y2)
def map():
x1=-600
y1=300
x2=-450
y2=300
drawline(x1,y1,x2,y2)
x1=-450
y1=300
x2=-450
y2=150
drawline(x1,y1,x2,y2)
x1=-450
y1=150
x2=-600
y2=150
drawline(x1,y1,x2,y2)
x1=-600
y1=150
x2=-600
y2=300
drawline(x1,y1,x2,y2)
x1=-600
y1=300
x2=-600
y2=200
drawline(x1,y1,x2,y2)
x1=-600
y1=200
x2=-450
y2=200
drawline(x1,y1,x2,y2)
x1=-450
y1=200
x2=-450
y2=250
drawline(x1,y1,x2,y2)
x1=-450
y1=250
x2=-600
y2=250
drawline(x1,y1,x2,y2)
x1=-600
y1=250
x2=-600
y2=150
drawline(x1,y1,x2,y2)
x1=-600
y1=150
x2=-590
y2=150
drawline(x1,y1,x2,y2)
x1=-590
y1=150
x2=-590
y2=170
drawline(x1,y1,x2,y2)
x1=-590
y1=170
x2=-580
y2=170
drawline(x1,y1,x2,y2)
x1=-580
y1=170
x2=-580
y2=150
drawline(x1,y1,x2,y2)
x1=-580
y1=150
x2=-600
y2=150
drawline(x1,y1,x2,y2)
x1=-600
y1=150
x2=-600
y2=200
drawline(x1,y1,x2,y2)
x1=-600
y1=200
x2=-590
y2=200
drawline(x1,y1,x2,y2)
x1=-590
y1=200
x2=-590
y2=220
drawline(x1,y1,x2,y2)
x1=-590
y1=220
x2=-580
y2=220
drawline(x1,y1,x2,y2)
x1=-580
y1=220
x2=-580
y2=200
drawline(x1,y1,x2,y2)
x1=-580
y1=200
x2=-600
y2=200
drawline(x1,y1,x2,y2)
x1=-600
y1=200
x2=-600
y2=250
drawline(x1,y1,x2,y2)
x1=-600
y1=250
x2=-590
y2=250
drawline(x1,y1,x2,y2)
x1=-590
y1=250
x2=-590
y2=270
drawline(x1,y1,x2,y2)
x1=-590
y1=270
x2=-580
y2=270
drawline(x1,y1,x2,y2)
x1=-580
y1=270
x2=-580
y2=250
drawline(x1,y1,x2,y2)
#window 1 (221C Bakers Street)
x1=-560
y1=280
x2=-540
y2=280
drawline(x1,y1,x2,y2)
x1=-540
y1=280
x2=-540
y2=260
drawline(x1,y1,x2,y2)
x1=-540
y1=260
x2=-560
y2=260
drawline(x1,y1,x2,y2)
x1=-560
y1=260
x2=-560
y2=280
drawline(x1,y1,x2,y2)
x1=-540
y1=280
x2=-435
y2=350
drawline(x1,y1,x2,y2)
x1=-435
y1=350
x2=-300
y2=350
drawline(x1,y1,x2,y2)
x1=-300
y1=350
x2=-300
y2=280
drawline(x1,y1,x2,y2)
x1=-300
y1=280
x2=-435
y2=280
drawline(x1,y1,x2,y2)
x1=-435
y1=280
x2=-435
y2=340
drawline(x1,y1,x2,y2)
x1=-435
y1=340
x2=-540
y2=280
drawline(x1,y1,x2,y2)
#window 2 (221B Bakers Street)
x1=-490
y1=210
x2=-470
y2=210
drawline(x1,y1,x2,y2)
x1=-470
y1=210
x2=-470
y2=230
drawline(x1,y1,x2,y2)
x1=-470
y1=230
x2=-490
y2=230
drawline(x1,y1,x2,y2)
x1=-490
y1=230
x2=-490
y2=210
drawline(x1,y1,x2,y2)
x1=-490
y1=210
x2=-470
y2=210
drawline(x1,y1,x2,y2)
x1=-470
y1=210
x2=-430
y2=200
drawline(x1,y1,x2,y2)
x1=-430
y1=200
x2=-300
y2=200
drawline(x1,y1,x2,y2)
x1=-300
y1=200
x2=-300
y2=120
drawline(x1,y1,x2,y2)
x1=-300
y1=120
x2=-430
y2=120
drawline(x1,y1,x2,y2)
x1=-430
y1=120
x2=-430
y2=190
drawline(x1,y1,x2,y2)
x1=-430
y1=190
x2=-470
y2=210
drawline(x1,y1,x2,y2)
#hospital
x1=-600
y1=10
x2=-480
y2=10
drawline(x1,y1,x2,y2)
x1=-480
y1=10
x2=-480
y2=90
drawline(x1,y1,x2,y2)
x1=-480
y1=90
x2=-600
y2=90
drawline(x1,y1,x2,y2)
x1=-600
y1=90
x2=-600
y2=10
drawline(x1,y1,x2,y2)
x1=-600
y1=10
x2=-580
y2=10
drawline(x1,y1,x2,y2)
x1=-580
y1=10
x2=-580
y2=30
drawline(x1,y1,x2,y2)
x1=-580
y1=30
x2=-570
y2=30
drawline(x1,y1,x2,y2)
x1=-570
y1=30
x2=-570
y2=10
drawline(x1,y1,x2,y2)
#hospital window (Sherlock's lab)
x1=-550
y1=80
x2=-530
y2=80
drawline(x1,y1,x2,y2)
x1=-530
y1=80
x2=-530
y2=60
drawline(x1,y1,x2,y2)
x1=-530
y1=60
x2=-550
y2=60
drawline(x1,y1,x2,y2)
x1=-550
y1=60
x2=-550
y2=80
drawline(x1,y1,x2,y2)
x1=-530
y1=80
x2=-530
y2=60
drawline(x1,y1,x2,y2)
x1=-530
y1=60
x2=-460
y2=80
drawline(x1,y1,x2,y2)
x1=-460
y1=80
x2=-360
y2=80
drawline(x1,y1,x2,y2)
x1=-360
y1=80
x2=-360
y2=0
drawline(x1,y1,x2,y2)
x1=-360
y1=0
x2=-460
y2=0
drawline(x1,y1,x2,y2)
x1=-460
y1=0
x2=-460
y2=70
drawline(x1,y1,x2,y2)
x1=-460
y1=70
x2=-530
y2=60
drawline(x1,y1,x2,y2)
#boarding school
x1=-600
y1=-70
x2=-480
y2=-70
drawline(x1,y1,x2,y2)
x1=-480
y1=-70
x2=-480
y2=-150
drawline(x1,y1,x2,y2)
x1=-480
y1=-150
x2=-600
y2=-150
drawline(x1,y1,x2,y2)
x1=-600
y1=-150
x2=-600
y2=-70
drawline(x1,y1,x2,y2)
x1=-580
y1=-150
x2=-580
y2=-130
drawline(x1,y1,x2,y2)
x1=-580
y1=-130
x2=-570
y2=-130
drawline(x1,y1,x2,y2)
x1=-570
y1=-130
x2=-570
y2=-150
drawline(x1,y1,x2,y2)
#sea shore
x1=-600
y1=-300
x2=-590
y2=-290
drawline(x1,y1,x2,y2)
x1=-590
y1=-290
x2=-580
y2=-270
drawline(x1,y1,x2,y2)
x1=-580
y1=-270
x2=-570
y2=-260
drawline(x1,y1,x2,y2)
x1=-570
y1=-260
x2=-560
y2=-250
drawline(x1,y1,x2,y2)
x1=-560
y1=-250
x2=-540
y2=-245
drawline(x1,y1,x2,y2)
x1=-540
y1=-245
x2=-520
y2=-220
drawline(x1,y1,x2,y2)
x1=-520
y1=-220
x2=-510
y2=-210
drawline(x1,y1,x2,y2)
x1=-510
y1=-210
x2=-490
y2=-200
drawline(x1,y1,x2,y2)
x1=-490
y1=-200
x2=-480
y2=-210
drawline(x1,y1,x2,y2)
x1=-480
y1=-210
x2=-470
y2=-220
drawline(x1,y1,x2,y2)
x1=-470
y1=-220
x2=-460
y2=-225
drawline(x1,y1,x2,y2)
x1=-460
y1=-225
x2=-455
y2=-230
drawline(x1,y1,x2,y2)
x1=-455
y1=-230
x2=-450
y2=-240
drawline(x1,y1,x2,y2)
x1=-450
y1=-240
x2=-445
y2=-245
drawline(x1,y1,x2,y2)
x1=-445
y1=-245
x2=-445
y2=-250
drawline(x1,y1,x2,y2)
x1=-445
y1=-250
x2=-445
y2=-255
drawline(x1,y1,x2,y2)
x1=-445
y1=-255
x2=-440
y2=-255
drawline(x1,y1,x2,y2)
x1=-440
y1=-255
x2=-435
y2=-260
drawline(x1,y1,x2,y2)
x1=-435
y1=-260
x2=-430
y2=-265
drawline(x1,y1,x2,y2)
x1=-430
y1=-265
x2=-420
y2=-265
drawline(x1,y1,x2,y2)
x1=-420
y1=-265
x2=-420
y2=-275
drawline(x1,y1,x2,y2)
x1=-420
y1=-275
x2=-415
y2=-280
drawline(x1,y1,x2,y2)
x1=-415
y1=-280
x2=-410
y2=-285
drawline(x1,y1,x2,y2)
x1=-410
y1=-285
x2=-405
y2=-290
drawline(x1,y1,x2,y2)
x1=-405
y1=-290
x2=-400
y2=-295
drawline(x1,y1,x2,y2)
x1=-400
y1=-295
x2=-395
y2=-300
drawline(x1,y1,x2,y2)
x1=-395
y1=-300
x2=-400
y2=-320
drawline(x1,y1,x2,y2)
x1=-400
y1=-320
x2=-400
y2=-330
drawline(x1,y1,x2,y2)
x1=-400
y1=-330
x2=-550
y2=-330
drawline(x1,y1,x2,y2)
x1=-550
y1=-330
x2=-600
y2=-300
drawline(x1,y1,x2,y2)
#car
x1=-540
y1=-250
x2=-500
y2=-250
drawline(x1,y1,x2,y2)
x1=-500
y1=-250
x2=-500
y2=-270
drawline(x1,y1,x2,y2)
x1=-500
y1=-270
x2=-540
y2=-270
drawline(x1,y1,x2,y2)
x1=-540
y1=-270
x2=-540
y2=-250
drawline(x1,y1,x2,y2)
x1=-500
y1=-255
x2=-490
y2=-260
drawline(x1,y1,x2,y2)
x1=-490
y1=-260
x2=-480
y2=-270
drawline(x1,y1,x2,y2)
x1=-480
y1=-270
x2=-500
y2=-270
drawline(x1,y1,x2,y2)
x1=-500
y1=-280
x2=-500
y2=-280
drawline(x1,y1,x2,y2)
t.circle(5)
x1=-530
y1=-280
x2=-530
y2=-280
drawline(x1,y1,x2,y2)
t.circle(5)
#police station
x1=-250
y1=350
x2=-150
y2=350
drawline(x1,y1,x2,y2)
x1=-150
y1=350
x2=-150
y2=250
drawline(x1,y1,x2,y2)
x1=-150
y1=250
x2=-250
y2=250
drawline(x1,y1,x2,y2)
x1=-250
y1=350
x2=-250
y2=250
drawline(x1,y1,x2,y2)
x1=-250
y1=250
x2=-245
y2=250
drawline(x1,y1,x2,y2)
x1=-245
y1=250
x2=-245
y2=270
drawline(x1,y1,x2,y2)
x1=-245
y1=270
x2=-235
y2=270
drawline(x1,y1,x2,y2)
x1=-235
y1=270
x2=-235
y2=250
drawline(x1,y1,x2,y2)
x1=-230
y1=300
x2=-170
y2=300
drawline(x1,y1,x2,y2)
x1=-170
y1=300
x2=-170
y2=340
drawline(x1,y1,x2,y2)
x1=-170
y1=340
x2=-230
y2=340
drawline(x1,y1,x2,y2)
x1=-230
y1=340
x2=-230
y2=300
drawline(x1,y1,x2,y2)
x1=-200
y1=305
x2=-200
y2=305
drawline(x1,y1,x2,y2)
t.circle(14)
#police inspection area
x1=-150
y1=330
x2=-70
y2=330
drawline(x1,y1,x2,y2)
x1=-70
y1=330
x2=-70
y2=250
drawline(x1,y1,x2,y2)
x1=-70
y1=250
x2=-150
y2=250
drawline(x1,y1,x2,y2)
x1=-150
y1=250
x2=-140
y2=250
drawline(x1,y1,x2,y2)
x1=-140
y1=250
x2=-140
y2=270
drawline(x1,y1,x2,y2)
x1=-140
y1=270
x2=-130
y2=270
drawline(x1,y1,x2,y2)
x1=-130
y1=270
x2=-130
y2=250
drawline(x1,y1,x2,y2)
#police cab
x1=-70
y1=240
x2=-20
y2=240
drawline(x1,y1,x2,y2)
x1=-20
y1=240
x2=-20
y2=210
drawline(x1,y1,x2,y2)
x1=-20
y1=210
x2=-70
y2=210
drawline(x1,y1,x2,y2)
x1=-70
y1=210
x2=-70
y2=240
drawline(x1,y1,x2,y2)
x1=-20
y1=230
x2=-10
y2=230
drawline(x1,y1,x2,y2)
x1=-10
y1=230
x2=0
y2=210
drawline(x1,y1,x2,y2)
x1=0
y1=210
x2=-70
y2=210
drawline(x1,y1,x2,y2)
x1=-60
y1=205
x2=-60
y2=205
drawline(x1,y1,x2,y2)
t.circle(3)
x1=-20
y1=205
x2=-20
y2=205
drawline(x1,y1,x2,y2)
t.circle(3)
#court
x1=50
y1=300
x2=100
y2=350
drawline(x1,y1,x2,y2)
x1=100
y1=350
x2=150
y2=300
drawline(x1,y1,x2,y2)
x1=150
y1=300
x2=50
y2=300
drawline(x1,y1,x2,y2)
x1=70
y1=300
x2=70
y2=250
drawline(x1,y1,x2,y2)
x1=70
y1=250
x2=130
y2=250
drawline(x1,y1,x2,y2)
x1=130
y1=250
x2=130
y2=300
drawline(x1,y1,x2,y2)
x1=110
y1=250
x2=110
y2=270
drawline(x1,y1,x2,y2)
x1=110
y1=270
x2=90
y2=270
drawline(x1,y1,x2,y2)
x1=90
y1=270
x2=90
y2=250
drawline(x1,y1,x2,y2)
#restaurant
x1=400
y1=350
x2=550
y2=350
drawline(x1,y1,x2,y2)
x1=550
y1=350
x2=550
y2=320
drawline(x1,y1,x2,y2)
x1=550
y1=320
x2=400
y2=320
drawline(x1,y1,x2,y2)
x1=400
y1=320
x2=400
y2=350
drawline(x1,y1,x2,y2)
x1=420
y1=320
x2=420
y2=250
drawline(x1,y1,x2,y2)
x1=420
y1=250
x2=530
y2=250
drawline(x1,y1,x2,y2)
x1=530
y1=250
x2=530
y2=320
drawline(x1,y1,x2,y2)
x1=450
y1=250
x2=450
y2=270
drawline(x1,y1,x2,y2)
x1=450
y1=270
x2=490
y2=270
drawline(x1,y1,x2,y2)
x1=490
y1=270
x2=490
y2=250
drawline(x1,y1,x2,y2)
#lauriston's garden
x1=400
y1=80
x2=450
y2=150
drawline(x1,y1,x2,y2)
x1=450
y1=150
x2=500
y2=80
drawline(x1,y1,x2,y2)
x1=500
y1=80
x2=400
y2=80
drawline(x1,y1,x2,y2)
x1=420
y1=80
x2=420
y2=30
drawline(x1,y1,x2,y2)
x1=420
y1=30
x2=480
y2=30
drawline(x1,y1,x2,y2)
x1=480
y1=30
x2=480
y2=80
drawline(x1,y1,x2,y2)
x1=440
y1=30
x2=440
y2=50
drawline(x1,y1,x2,y2)
x1=440
y1=50
x2=460
y2=50
drawline(x1,y1,x2,y2)
x1=460
y1=50
x2=460
y2=30
drawline(x1,y1,x2,y2)
x1=440
y1=70
x2=460
y2=70
drawline(x1,y1,x2,y2)
x1=460
y1=70
x2=460
y2=60
drawline(x1,y1,x2,y2)
x1=460
y1=60
x2=440
y2=60
drawline(x1,y1,x2,y2)
x1=440
y1=60
x2=440
y2=70
drawline(x1,y1,x2,y2)
x1=440
y1=70
x2=390
y2=60
drawline(x1,y1,x2,y2)
x1=390
y1=60
x2=300
y2=60
drawline(x1,y1,x2,y2)
x1=300
y1=60
x2=300
y2=10
drawline(x1,y1,x2,y2)
x1=300
y1=10
x2=390
y2=10
drawline(x1,y1,x2,y2)
x1=390
y1=10
x2=390
y2=50
drawline(x1,y1,x2,y2)
x1=390
y1=50
x2=440
y2=70
drawline(x1,y1,x2,y2)
#journalist house
x1=460
y1=-40
x2=460
y2=-110
drawline(x1,y1,x2,y2)
x1=460
y1=-110
x2=550
y2=-110
drawline(x1,y1,x2,y2)
x1=550
y1=-110
x2=550
y2=-40
drawline(x1,y1,x2,y2)
x1=550
y1=-40
x2=440
y2=-40
drawline(x1,y1,x2,y2)
x1=440
y1=-40
x2=440
y2=-20
drawline(x1,y1,x2,y2)
x1=440
y1=-20
x2=570
y2=-20
drawline(x1,y1,x2,y2)
x1=570
y1=-20
x2=570
y2=-40
drawline(x1,y1,x2,y2)
x1=570
y1=-40
x2=550
y2=-40
drawline(x1,y1,x2,y2)
#Mrs Price house
x1=400
y1=-300
x2=550
y2=-300
drawline(x1,y1,x2,y2)
x1=550
y1=-300
x2=550
y2=-200
drawline(x1,y1,x2,y2)
x1=550
y1=-200
x2=400
y2=-200
drawline(x1,y1,x2,y2)
x1=400
y1=-200
x2=400
y2=-300
drawline(x1,y1,x2,y2)
x1=400
y1=-300
x2=410
y2=-300
drawline(x1,y1,x2,y2)
x1=410
y1=-300
x2=410
y2=-280
drawline(x1,y1,x2,y2)
x1=410
y1=-280
x2=420
y2=-280
drawline(x1,y1,x2,y2)
x1=420
y1=-280
x2=420
y2=-300
drawline(x1,y1,x2,y2)
x1=400
y1=-200
x2=390
y2=-200
drawline(x1,y1,x2,y2)
x1=390
y1=-200
x2=470
y2=-180
drawline(x1,y1,x2,y2)
x1=470
y1=-180
x2=560
y2=-200
drawline(x1,y1,x2,y2)
x1=560
y1=-200
x2=550
y2=-200
drawline(x1,y1,x2,y2)
x1=440
y1=-260
x2=510
y2=-260
drawline(x1,y1,x2,y2)
x1=510
y1=-260
x2=510
y2=-220
drawline(x1,y1,x2,y2)
x1=510
y1=-220
x2=440
y2=-220
drawline(x1,y1,x2,y2)
x1=440
y1=-220
x2=440
y2=-260
drawline(x1,y1,x2,y2)
#ware house
x1=150
y1=-300
x2=300
y2=-300
drawline(x1,y1,x2,y2)
x1=300
y1=-300
x2=300
y2=-200
drawline(x1,y1,x2,y2)
x1=300
y1=-200
x2=150
y2=-200
drawline(x1,y1,x2,y2)
x1=150
y1=-200
x2=150
y2=-300
drawline(x1,y1,x2,y2)
x1=160
y1=-300
x2=160
y2=-280
drawline(x1,y1,x2,y2)
x1=160
y1=-280
x2=170
y2=-280
drawline(x1,y1,x2,y2)
x1=170
y1=-280
x2=170
y2=-300
drawline(x1,y1,x2,y2)
#janus cars
x1=-150
y1=-300
x2=-30
y2=-300
drawline(x1,y1,x2,y2)
x1=-30
y1=-300
x2=-30
y2=-200
drawline(x1,y1,x2,y2)
x1=-30
y1=-200
x2=-150
y2=-200
drawline(x1,y1,x2,y2)
x1=-150
y1=-200
x2=-150
y2=-300
drawline(x1,y1,x2,y2)
x1=-120
y1=-300
x2=-120
y2=-280
drawline(x1,y1,x2,y2)
x1=-120
y1=-280
x2=-110
y2=-280
drawline(x1,y1,x2,y2)
x1=-110
y1=-280
x2=-110
y2=-300
drawline(x1,y1,x2,y2)
#swimming pool
x1=100
y1=-100
x2=200
y2=-100
drawline(x1,y1,x2,y2)
x1=200
y1=-100
x2=200
y2=-50
drawline(x1,y1,x2,y2)
x1=200
y1=-50
x2=100
y2=-50
drawline(x1,y1,x2,y2)
x1=100
y1=-50
x2=100
y2=-100
drawline(x1,y1,x2,y2)
#high building for final fight
x1=100
y1=20
x2=120
y2=120
drawline(x1,y1,x2,y2)
x1=120
y1=120
x2=140
y2=130
drawline(x1,y1,x2,y2)
x1=140
y1=130
x2=160
y2=120
drawline(x1,y1,x2,y2)
x1=160
y1=120
x2=180
y2=20
drawline(x1,y1,x2,y2)
x1=180
y1=20
x2=160
y2=10
drawline(x1,y1,x2,y2)
x1=160
y1=10
x2=100
y2=20
drawline(x1,y1,x2,y2)
x1=140
y1=130
x2=160
y2=10
drawline(x1,y1,x2,y2)
x1=120
y1=16
x2=120
y2=36
drawline(x1,y1,x2,y2)
x1=120
y1=36
x2=130
y2=35
drawline(x1,y1,x2,y2)
x1=130
y1=35
x2=130
y2=16
drawline(x1,y1,x2,y2)
#college building
x1=-180
y1=0
x2=-80
y2=0
drawline(x1,y1,x2,y2)
x1=-80
y1=0
x2=-80
y2=80
drawline(x1,y1,x2,y2)
x1=-80
y1=80
x2=-180
y2=80
drawline(x1,y1,x2,y2)
x1=-180
y1=80
x2=-180
y2=0
drawline(x1,y1,x2,y2)
x1=-180
y1=0
x2=-160
y2=0
drawline(x1,y1,x2,y2)
x1=-160
y1=0
x2=-160
y2=20
drawline(x1,y1,x2,y2)
x1=-160
y1=20
x2=-150
y2=20
drawline(x1,y1,x2,y2)
x1=-150
y1=20
x2=-150
y2=0
drawline(x1,y1,x2,y2)
x1=-140
y1=40
x2=-120
y2=40
drawline(x1,y1,x2,y2)
x1=-120
y1=40
x2=-120
y2=60
drawline(x1,y1,x2,y2)
x1=-120
y1=60
x2=-140
y2=60
drawline(x1,y1,x2,y2)
x1=-140
y1=60
x2=-140
y2=40
drawline(x1,y1,x2,y2)
x1=-120
y1=40
x2=-70
y2=50
drawline(x1,y1,x2,y2)
x1=-70
y1=50
x2=10
y2=50
drawline(x1,y1,x2,y2)
x1=10
y1=50
x2=10
y2=100
drawline(x1,y1,x2,y2)
x1=10
y1=100
x2=-70
y2=100
drawline(x1,y1,x2,y2)
x1=-70
y1=100
x2=-70
y2=60
drawline(x1,y1,x2,y2)
x1=-70
y1=60
x2=-120
y2=40
drawline(x1,y1,x2,y2)
#london tower
x1=-350
y1=-150
x2=-250
y2=-150
drawline(x1,y1,x2,y2)
x1=-250
y1=-150
x2=-250
y2=-100
drawline(x1,y1,x2,y2)
x1=-250
y1=-100
x2=-350
y2=-100
drawline(x1,y1,x2,y2)
x1=-350
y1=-100
x2=-350
y2=-150
drawline(x1,y1,x2,y2)
x1=-330
y1=-150
x2=-330
y2=-130
drawline(x1,y1,x2,y2)
x1=-330
y1=-130
x2=-320
y2=-130
drawline(x1,y1,x2,y2)
x1=-320
y1=-130
x2=-320
y2=-150
drawline(x1,y1,x2,y2)
#roads
x1=-700
y1=140
x2=-590
y2=140
drawline(x1,y1,x2,y2)
x1=-590
y1=140
x2=-590
y2=145
drawline(x1,y1,x2,y2)
x1=-580
y1=145
x2=-580
y2=140
drawline(x1,y1,x2,y2)
x1=-580
y1=140
x2=-430
y2=140
drawline(x1,y1,x2,y2)
x1=-400
y1=200
x2=-400
y2=280
drawline(x1,y1,x2,y2)
x1=-370
y1=280
x2=-370
y2=205
drawline(x1,y1,x2,y2)
x1=-370
y1=205
x2=-245
y2=205
drawline(x1,y1,x2,y2)
x1=-245
y1=205
x2=-245
y2=245
drawline(x1,y1,x2,y2)
x1=-235
y1=245
x2=-235
y2=205
drawline(x1,y1,x2,y2)
x1=-235
y1=205
x2=90
y2=205
drawline(x1,y1,x2,y2)
x1=90
y1=205
x2=90
y2=245
drawline(x1,y1,x2,y2)
x1=110
y1=245
x2=110
y2=205
drawline(x1,y1,x2,y2)
x1=110
y1=205
x2=450
y2=205
drawline(x1,y1,x2,y2)
x1=450
y1=205
x2=450
y2=245
drawline(x1,y1,x2,y2)
x1=490
y1=245
x2=490
y2=205
drawline(x1,y1,x2,y2)
x1=490
y1=205
x2=700
y2=205
drawline(x1,y1,x2,y2)
x1=700
y1=160
x2=350
y2=160
drawline(x1,y1,x2,y2)
x1=350
y1=160
x2=350
y2=60
drawline(x1,y1,x2,y2)
x1=390
y1=20
x2=440
y2=20
drawline(x1,y1,x2,y2)
x1=440
y1=20
x2=440
y2=25
drawline(x1,y1,x2,y2)
x1=460
y1=25
x2=460
y2=20
drawline(x1,y1,x2,y2)
x1=460
y1=20
x2=520
y2=20
drawline(x1,y1,x2,y2)
x1=520
y1=20
x2=520
y2=25
drawline(x1,y1,x2,y2)
x1=560
y1=25
x2=560
y2=20
drawline(x1,y1,x2,y2)
x1=560
y1=20
x2=700
y2=20
drawline(x1,y1,x2,y2)
x1=-700
y1=100
x2=-400
y2=100
drawline(x1,y1,x2,y2)
x1=-400
y1=100
x2=-400
y2=80
drawline(x1,y1,x2,y2)
x1=-400
y1=0
x2=-400
y2=-20
drawline(x1,y1,x2,y2)
x1=-400
y1=-20
x2=-570
y2=-20
drawline(x1,y1,x2,y2)
x1=-570
y1=-20
x2=-570
y2=-5
drawline(x1,y1,x2,y2)
x1=-580
y1=-5
x2=-580
y2=-20
drawline(x1,y1,x2,y2)
x1=-580
y1=-20
x2=-700
y2=-20
drawline(x1,y1,x2,y2)
x1=-700
y1=-50
x2=-400
y2=-50
drawline(x1,y1,x2,y2)
x1=-400
y1=-50
x2=-400
y2=-170
drawline(x1,y1,x2,y2)
x1=-400
y1=-170
x2=-570
y2=-170
drawline(x1,y1,x2,y2)
x1=-570
y1=-170
x2=-570
y2=-160
drawline(x1,y1,x2,y2)
x1=-580
y1=-160
x2=-580
y2=-170
drawline(x1,y1,x2,y2)
x1=-580
y1=-170
x2=-700
y2=-170
drawline(x1,y1,x2,y2)
x1=-700
y1=-200
x2=-400
y2=-200
drawline(x1,y1,x2,y2)
x1=-400
y1=-200
x2=-350
y2=-340
drawline(x1,y1,x2,y2)
x1=-350
y1=-340
x2=650
y2=-340
drawline(x1,y1,x2,y2)
x1=650
y1=-340
x2=650
y2=-10
drawline(x1,y1,x2,y2)
x1=650
y1=-10
x2=700
y2=-10
drawline(x1,y1,x2,y2)
x1=-300
y1=170
x2=20
y2=170
drawline(x1,y1,x2,y2)
x1=20
y1=170
x2=20
y2=-20
drawline(x1,y1,x2,y2)
x1=20
y1=-20
x2=-150
y2=-20
drawline(x1,y1,x2,y2)
x1=-150
y1=-20
x2=-150
y2=-15
drawline(x1,y1,x2,y2)
x1=-160
y1=-15
x2=-160
y2=-20
drawline(x1,y1,x2,y2)
x1=-160
y1=-20
x2=-370
y2=-20
drawline(x1,y1,x2,y2)
x1=-370
y1=-20
x2=-370
y2=0
drawline(x1,y1,x2,y2)
x1=-370
y1=80
x2=-370
y2=120
drawline(x1,y1,x2,y2)
x1=60
y1=160
x2=310
y2=160
drawline(x1,y1,x2,y2)
x1=310
y1=160
x2=310
y2=60
drawline(x1,y1,x2,y2)
x1=310
y1=10
x2=310
y2=-20
drawline(x1,y1,x2,y2)
x1=310
y1=-20
x2=130
y2=-20
drawline(x1,y1,x2,y2)
x1=130
y1=-20
x2=130
y2=0
drawline(x1,y1,x2,y2)
x1=120
y1=0
x2=120
y2=-20
drawline(x1,y1,x2,y2)
x1=120
y1=-20
x2=60
y2=-20
drawline(x1,y1,x2,y2)
x1=60
y1=-20
x2=60
y2=160
drawline(x1,y1,x2,y2)
x1=-370
y1=-50
x2=20
y2=-50
drawline(x1,y1,x2,y2)
x1=20
y1=-50
x2=20
y2=-160
drawline(x1,y1,x2,y2)
x1=20
y1=-160
x2=-320
y2=-160
drawline(x1,y1,x2,y2)
x1=-320
y1=-160
x2=-320
y2=-155
drawline(x1,y1,x2,y2)
x1=-330
y1=-155
x2=-330
y2=-160
drawline(x1,y1,x2,y2)
x1=-330
y1=-160
x2=-365
y2=-160
drawline(x1,y1,x2,y2)
x1=-365
y1=-160
x2=-365
y2=-50
drawline(x1,y1,x2,y2)
x1=60
y1=-45
x2=310
y2=-45
drawline(x1,y1,x2,y2)
x1=310
y1=-45
x2=310
y2=-160
drawline(x1,y1,x2,y2)
x1=310
y1=-160
x2=60
y2=-160
drawline(x1,y1,x2,y2)
x1=60
y1=-160
x2=60
y2=-45
drawline(x1,y1,x2,y2)
x1=350
y1=-10
x2=610
y2=-10
drawline(x1,y1,x2,y2)
x1=610
y1=-10
x2=610
y2=-130
drawline(x1,y1,x2,y2)
x1=610
y1=-130
x2=520
y2=-130
drawline(x1,y1,x2,y2)
x1=520
y1=-130
x2=520
y2=-120
drawline(x1,y1,x2,y2)
x1=510
y1=-120
x2=510
y2=-130
drawline(x1,y1,x2,y2)
x1=510
y1=-130
x2=350
y2=-130
drawline(x1,y1,x2,y2)
x1=350
y1=-130
x2=350
y2=-10
drawline(x1,y1,x2,y2)
x1=350
y1=-160
x2=610
y2=-160
drawline(x1,y1,x2,y2)
x1=610
y1=-160
x2=610
y2=-310
drawline(x1,y1,x2,y2)
x1=610
y1=-310
x2=425
y2=-310
drawline(x1,y1,x2,y2)
x1=425
y1=-310
x2=425
y2=-305
drawline(x1,y1,x2,y2)
x1=415
y1=-305
x2=415
y2=-310
drawline(x1,y1,x2,y2)
x1=415
y1=-310
x2=350
y2=-310
drawline(x1,y1,x2,y2)
x1=350
y1=-310
x2=350
y2=-160
drawline(x1,y1,x2,y2)
x1=310
y1=-190
x2=70
y2=-190
drawline(x1,y1,x2,y2)
x1=70
y1=-190
x2=70
y2=-310
drawline(x1,y1,x2,y2)
x1=70
y1=-310
x2=160
y2=-310
drawline(x1,y1,x2,y2)
x1=160
y1=-310
x2=160
y2=-305
drawline(x1,y1,x2,y2)
x1=170
y1=-305
x2=170
y2=-310
drawline(x1,y1,x2,y2)
x1=170
y1=-310
x2=310
y2=-310
drawline(x1,y1,x2,y2)
x1=310
y1=-310
x2=310
y2=-190
drawline(x1,y1,x2,y2)
x1=20
y1=-190
x2=-360
y2=-190
drawline(x1,y1,x2,y2)
x1=-360
y1=-190
x2=-320
y2=-310
drawline(x1,y1,x2,y2)
x1=-320
y1=-310
x2=-120
y2=-310
drawline(x1,y1,x2,y2)
x1=-120
y1=-310
x2=-120
y2=-305
drawline(x1,y1,x2,y2)
x1=-110
y1=-305
x2=-110
y2=-310
drawline(x1,y1,x2,y2)
x1=-110
y1=-310
x2=20
y2=-310
drawline(x1,y1,x2,y2)
x1=20
y1=-310
x2=20
y2=-190
drawline(x1,y1,x2,y2)
#hostage car
x1=-300
y1=80
x2=-250
y2=80
drawline(x1,y1,x2,y2)
x1=-250
y1=80
x2=-240
y2=70
drawline(x1,y1,x2,y2)
x1=-240
y1=70
x2=-220
y2=70
drawline(x1,y1,x2,y2)
x1=-220
y1=70
x2=-220
y2=60
drawline(x1,y1,x2,y2)
x1=-220
y1=60
x2=-330
y2=60
drawline(x1,y1,x2,y2)
x1=-330
y1=60
x2=-330
y2=70
drawline(x1,y1,x2,y2)
x1=-330
y1=70
x2=-310
y2=70
drawline(x1,y1,x2,y2)
x1=-310
y1=70
x2=-300
y2=80
drawline(x1,y1,x2,y2)
x1=-300
y1=50
x2=-300
y2=50
drawline(x1,y1,x2,y2)
t.circle(5)
x1=-250
y1=50
x2=-250
y2=50
drawline(x1,y1,x2,y2)
t.circle(5)
x1=-600
y1=300
x2=-600
y2=300
drawline(x1,y1,x2,y2)
t.color("red")
t.write("221 Bakers Street")
x1=-587
y1=150
x2=-587
y2=150
drawline(x1,y1,x2,y2)
t.color("red")
t.write("A")
x1=-587
y1=200
x2=-587
y2=200
drawline(x1,y1,x2,y2)
t.color("red")
t.write("B")
x1=-587
y1=250
x2=-587
y2=250
drawline(x1,y1,x2,y2)
t.color("red")
t.write("C")
x1=-400
y1=300
x2=-400
y2=300
drawline(x1,y1,x2,y2)
t.color("red")
t.write("shoes")
x1=-400
y1=150
x2=-400
y2=150
drawline(x1,y1,x2,y2)
t.color("red")
t.write("sofa")
x1=-360
y1=150
x2=-360
y2=150
drawline(x1,y1,x2,y2)
t.color("red")
t.write("sofa")
x1=-380
y1=170
x2=-380
y2=170
drawline(x1,y1,x2,y2)
t.color("red")
t.write("chair")
x1=-230
y1=280
x2=-230
y2=280
drawline(x1,y1,x2,y2)
t.color("blue")
t.write("Police Station")
x1=-150
y1=310
x2=-150
y2=310
drawline(x1,y1,x2,y2)
t.color("blue")
t.write("Police Inspection")
x1=80
y1=310
x2=80
y2=310
drawline(x1,y1,x2,y2)
t.color("grey")
t.write("Court")
x1=450
y1=330
x2=450
y2=330
drawline(x1,y1,x2,y2)
t.color("orange")
t.write("Restaurant")
x1=355
y1=140
x2=355
y2=140
drawline(x1,y1,x2,y2)
t.color("purple")
t.write("Lauriston's Garden")
x1=550
y1=140
x2=550
y2=140
drawline(x1,y1,x2,y2)
t.color("green")
t.write("Plants")
x1=600
y1=110
x2=600
y2=110
drawline(x1,y1,x2,y2)
t.color("green")
t.write("Garbage Bin")
x1=500
y1=90
x2=500
y2=90
drawline(x1,y1,x2,y2)
t.color("green")
t.write("Gutter")
x1=310
y1=20
x2=310
y2=20
drawline(x1,y1,x2,y2)
t.color("maroon")
t.write("Jennifer's body")
x1=460
y1=-40
x2=460
y2=-40
drawline(x1,y1,x2,y2)
t.color("pink")
t.write("Journalist House")
x1=430
y1=-180
x2=430
y2=-180
drawline(x1,y1,x2,y2)
t.color("brown")
t.write("Mrs. Prince House")
x1=200
y1=-220
x2=200
y2=-220
drawline(x1,y1,x2,y2)
t.color("yellow")
t.write("Ware House")
x1=120
y1=-80
x2=120
y2=-80
drawline(x1,y1,x2,y2)
t.color("blue")
t.write("Swimming pool")
x1=120
y1=140
x2=120
y2=140
drawline(x1,y1,x2,y2)
t.color("red")
t.write("High Building for Final Fight")
x1=-170
y1=80
x2=-170
y2=80
drawline(x1,y1,x2,y2)
t.color("orange")
t.write("College Building")
x1=-60
y1=70
x2=-60
y2=70
drawline(x1,y1,x2,y2)
t.color("green")
t.write("Identical pills")
x1=-300
y1=80
x2=-300
y2=80
drawline(x1,y1,x2,y2)
t.color("brown")
t.write("Hostage car")
x1=-340
y1=-120
x2=-340
y2=-120
drawline(x1,y1,x2,y2)
t.color("brown")
t.write("London Tower")
x1=-130
y1=-240
x2=-130
y2=-240
drawline(x1,y1,x2,y2)
t.color("grey")
t.write("Janus Cars")
x1=-570
y1=-300
x2=-570
y2=-300
drawline(x1,y1,x2,y2)
t.color("blue")
t.write("Seashore")
x1=-590
y1=-100
x2=-590
y2=-100
drawline(x1,y1,x2,y2)
t.color("purple")
t.write("Boarding School")
x1=-590
y1=40
x2=-590
y2=40
drawline(x1,y1,x2,y2)
t.color("blue")
t.write("Hospital")
x1=-450
y1=50
x2=-450
y2=50
drawline(x1,y1,x2,y2)
t.color("green")
t.write("Sherlock's lab")
t.done()
def space(): #AHMED ASHRAF SP18-BSE-009
print("\n")
def invalid(): #AHMED ASHRAF SP18-BSE-009
print("TRY ANOTHER COMMAND ")
#AHMED ASHRAF SP18-BSE-009
def slowprint(message, speed): #using for printing the string slowly
for x in range(0, len(message)):
if x == len(message)-1:
print(message[x])
else:
print(message[x], end="")
sys.stdout.flush()
time.sleep(speed)
def final1():
while True:
zx=open("ali\\final.txt","r")
slowprint(zx.read(),0.04)
zx.close()
time.sleep(2)
space()
space()
zz=open("ali\\final1.txt","r")
slowprint(zz.read(),0.04)
zz.close()
time.sleep(2)
space()
space()
za=open("ali\\final2.txt","r")
slowprint(za.read(),0.04)
za.close()
time.sleep(2)
space()
space()
zc=open("ali\\final3.txt","r")
slowprint(zc.read(),0.04)
zc.close()
time.sleep(2)
space()
space()
zv=open("ali\\final4.txt","r")
slowprint(zv.read(),0.04)
zv.close()
time.sleep(2)
space()
space()
zs=open("ali\\final5.txt","r")
slowprint(zs.read(),0.04)
zs.close()
time.sleep(2)
space()
space()
while True:
j=input("Do you want to jump?")
j=j.lower()
if 'yes' in j:
zl=open("ali\\final6.txt","r")
slowprint(zl.read(),0.04)
zl.close()
time.sleep(2)
space()
space()
elif 'no' in j:
ze=open("ali\\final7.txt","r")
slowprint(ze.read(),0.04)
ze.close()
time.sleep(2)
space()
space()
elif "map" in j:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def final():
while True:
f=input("Do you want to fight Jim...??")
f=f.lower()
if 'fight' in f or 'yes' in f:
dd=open("ali\\fighta.txt","r")
slowprint(dd.read(),0.04)
dd.close()
time.sleep(2)
space()
space()
final1()
elif 'dont' in f or 'no' in f or "don't" in f:
dc=open("ali\\fighta1.txt","r")
slowprint(dc.read(),0.04)
dc.close()
time.sleep(2)
space()
space()
final1()
elif "map" in f:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def kitty():
while True:
aa=open("ali\\kitty.txt","r")
slowprint(aa.read(),0.04)
aa.close()
time.sleep(2)
space()
space()
bb=open("ali\\kitty2.txt","r")
slowprint(bb.read(),0.04)
bb.close()
time.sleep(2)
space()
space()
while True:
z=input("Do you want to talk to Jim??")
z=z.lower()
if 'yes' in z:
gg=open("ali\\kitty3.txt","r")
slowprint(gg.read(),0.04)
gg.close()
time.sleep(2)
space()
space()
while True:
hb=open("ali\\kitty4.txt","r")
slowprint(hb.read(),0.04)
hb.close()
time.sleep(2)
space()
space()
ee=open("ali\\kitty5.txt","r")
slowprint(ee.read(),0.04)
ee.close()
time.sleep(2)
space()
space()
while True:
gd=open("ali\\dr.txt","r")
slowprint(gd.read(),0.04)
gd.close()
time.sleep(2)
space()
space()
while True:
w=input("Do you want to Talk with sherlock?")
w=w.lower()
if 'yes' in w:
qq=open("ali\\dr1.txt","r")
slowprint(qq.read(),0.04)
qq.close()
time.sleep(2)
space()
space()
while True:
bc=open("ali\\dr3.txt","r")
slowprint(bc.read(),0.04)
bc.close()
time.sleep(2)
space()
space()
sl=open("ali\\dr4.txt","r")
slowprint(sl.read(),0.04)
sl.close()
time.sleep(2)
space()
space()
while True:
vv=open("ali\\dr5.txt","r")
slowprint(vv.read(),0.04)
vv.close()
time.sleep(2)
space()
space()
t=input("What to do now:?")
t=t.lower()
if 'chat' in t or 'talk' in t:
vz=open("ali\\jump.txt","r")
slowprint(vz.read(),0.04)
vz.close()
time.sleep(2)
space()
space()
final()
elif 'walk' in t or 'around' in t:
mq=open("ali\\jump1.txt","r")
slowprint(mq.read(),0.04)
mq.close()
time.sleep(2)
space()
space()
final()
elif "map" in t:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
elif 'no' in w :
wr=open("ali\\dr2.txt","r")
slowprint(wr.read(),0.04)
wr.close()
time.sleep(2)
space()
space()
elif "map" in w:
map()
space()
continue
else:
invalid()
timesleep(2)
space()
space()
continue
elif 'no' in z:
ss=open("ali\\kitty8.txt","r")
slowprint(ss.read(),0.04)
ss.close()
time.sleep(2)
space()
space()
kitty()
elif "map" in z:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def run():
while True:
az=open("ali\\run.txt","r")
slowprint(az.read(),0.04)
az.close()
time.sleep(2)
space()
space()
aa=open("ali\\run1.txt","r")
slowprint(aa.read(),0.04)
aa.close()
time.sleep(2)
space()
space()
while True:
l=input("Do you want sherlock to run?")
l=l.lower()
if 'yes' in l or 'run' in l:
hh=open("ali\\run2.txt","r")
slowprint(hh.read(),0.04)
hh.close()
time.sleep(2)
space()
space()
while True:
f=input("What to do Now")
f=f.lower()
if 'go' in f or 'walk' in f or 'home' in f:
nb=open("ali\\home.txt","r")
slowprint(nb.read(),0.04)
nb.close()
time.sleep(2)
space()
space()
while True:
cc=open("ali\\sad.txt","r")
slowprint(cc.read(),0.04)
cc.close()
time.sleep(2)
space()
space()
c=input("Do you want to take the money")
c=c.lower()
if 'yes' in c:
az=open("ali\\home1.txt","r")
slowprint(az.read(),0.04)
az.close()
time.sleep(2)
space()
space()
while True:
rr=open("ali\\hell.txt","r")
slowprint(rr.close(),0.04)
rr.close()
time.sleep(2)
space()
space()
pp=open("ali\\hell1.txt","r")
slowprint(pp.close(),0.04)
pp.close()
time.sleep(2)
space()
space()
while True:
y=input("Break-in or ring the bell?")
if 'break' in y:
la=open("ali\\hell2.txt","r")
slowprint(la.close(),0.04)
la.close()
time.sleep(2)
space()
space()
kitty()
elif 'ring' in y or 'bell' in y:
vv=open("ali\\hell3.txt","r")
slowprint(vv.read(),0.04)
vv.close()
time.sleep(2)
space()
space()
kitty()
elif "map" in y:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
elif 'no' in c:
dd=open("ali\\home2.txt","r")
slowprint(dd.close(),0.04)
dd.close()
time.sleep(2)
space()
space()
elif "map" in c:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
elif 'no' in l or 'dont' in l or "don't" in l:
mn=open("ali\\run3.txt","r")
slowprint(mn.read(),0.04)
mn.close()
time.sleep(2)
space()
space()
elif "map" in l:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def arrest():
while True:
ff=open("ali\\arrest.txt","r")
slowprint(ff.read(),0.04)
ff.close()
time.sleep(2)
space()
space()
while True:
w=input("What to do now?:")
w=w.lower()
if 'go' in w:
dd=open("ali\\arrest1.txt","r")
slowprint(dd.read(),0.04)
dd.close()
time.sleep(2)
space()
space()
elif "map" in w:
map()
space()
continue
elif 'dont' in w or "don't" in w:
ds=open("ali\\arrest2.txt","r")
slowprint(ds.read(),0.04)
ds.close()
space()
space()
aa=open("ali\\arrest4.txt","r")
slowprint(aa.read(),0.04)
aa.close()
time.sleep(2)
space()
space()
while True:
po=open("ali\\arrest5.txt","r")
slowprint(po.read(),0.04)
po.close()
time.sleep(2)
space()
space()
ll=open("ali\\arrest6.txt","r")
slowprint(ll.read(),0.04)
ll.close()
time.sleep(2)
space()
space()
k=input("Fight the police man..??")
k=k.lower()
if 'yes' in k or 'fight' in k:
jj=open("ali\\fight.txt","r")
slowprint(jj.read(),0.04)
jj.close()
time.sleep(2)
space()
space()
nn=open("ali\\fight2.txt","r")
slowprint(nn.read(),0.04)
nn.close()
time.sleep(2)
space()
space()
run()
elif 'no' in k or 'not' in k or 'dont' in k or "dont't" in k:
mm=open("ali\\fight1.txt","r")
slowprint(mm.read(),0.04)
mm.close()
time.sleep(2)
space()
space()
run()
elif "map" in k:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
def kidnap2():
while True:
az=open("ali\\sally.txt","r")
slowprint(az.read(),0.04)
az.close()
time.sleep(2)
space()
space()
aq=open("ali\\sally2.txt","r")
slowprint(aq.read(),0.04)
aq.close()
time.sleep(2)
space()
space()
af=open("ali\\sally3.txt","r")
slowprint(af.read(),0.04)
af.close()
time.sleep(2)
space()
space()
while True:
o=input("Do you want to open the door?:")
if 'yes' in o:
rr=open("ali\\sally4.txt","r")
slowprint(rr.read(),0.04)
rr.close()
time.sleep(2)
space()
space()
arrest()
elif 'no' in o:
ew=open("ali\\sally5.txt","r")
slowprint(ew.read(),0.04)
ew.close()
time.sleep(2)
space()
space()
rr=open("ali\\sally6.txt","r")
slowprint(rr.read(),0.04)
rr.close()
time.sleep(2)
space()
space()
arrest()
elif "map" in o:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def addlestone():
while True:
df=open("ali\\addle.txt","r")
slowprint(df.read(),0.03)
df.close()
time.sleep(2)
space()
space()
while True:
gg=open("ali\\addle2.txt","r")
slowprint(gg.read(),0.03)
gg.close()
time.sleep(2)
space()
space()
gq=open("ali\\addle3.txt","r")
slowprint(gq.read(),0.03)
gq.close()
time.sleep(2)
space()
space()
dd=open("ali\\taxi1.txt","r")
slowprint(dd.read(),0.03)
dd.close()
time.sleep(2)
space()
space()
while True:
v=input("Aren't you feeling Hungry...!!!??")
v=v.lower()
if 'yes' in v or 'hungary' in v or 'starving' in v:
bb=open("ali\\rest.txt","r")
slowprint(bb.read(),0.03)
bb.close()
time.sleep(2)
space()
space()
while True:
zz=open("ali\\rest2.txt","r")
slowprint(zz.read(),0.04)
zz.close()
time.sleep(2)
space()
space()
kidnap2()
elif 'no' in v or 'not' in v:
cc=open("ali\\rest1.txt","r")
slowprint(cc.read(),0.03)
cc.close()
time.sleep(2)
space()
space()
oo=open("ali\\rest3.txt","r")
slowprint(oo.read(),0.04)
oo.close()
time.sleep(2)
space()
space()
kidnap2()
elif "map" in v:
map()
space()
continue
else:
space()
invalid()
time.sleep(2)
space()
space()
continue
def kidnapping():
while True:
qa=open("ali\\kidnap.txt","r")
slowprint(qa.read(),0.03)
qa.close()
time.sleep(2)
space()
space()
while True:
q=input("Do you Want Dr. John to open it..??")
if 'yes' in q or 'want' in q:
sd=open("ali\\open.txt","r")
slowprint(sd.read(),0.03)
sd.close()
time.sleep(2)
space()
space()
while True:
qq=open("ali\\kid1.txt","r")
slowprint(qq.read(),0.03)
qq.close()
time.sleep(2)
space()
space()
er=open("ali\\asd.txt","r")
slowprint(er.read(),0.03)
er.close()
time.sleep(2)
space()
space()
az=open("ali\\kid2.txt","r")
slowprint(az.read(),0.03)
az.close()
time.sleep(2)
space()
space()
gg=open("ali\\kid3.txt","r")
slowprint(gg.read(),0.03)
gg.close()
time.sleep(2)
space()
space()
while True:
d=input("Do you Want to Sherlock to search the place?")
d=d.lower()
if 'yes' in d or 'sure' in d or 'why not' in d or 'search' in d or 'examine' in d:
we=open("ali\\search1.txt","r")
slowprint(we.read(),0.03)
we.close()
time.sleep(2)
space()
space()
ty=open("ali\\search2.txt","r")
slowprint(ty.read(),0.03)
ty.close()
time.sleep(2)
space()
space()
ee=open("ali\\search3.txt","r")
slowprint(ee.read(),0.03)
ee.close()
time.sleep(2)
space()
space()
pp=open("ali\\search4.txt","r")
slowprint(pp.read(),0.03)
pp.close()
time.sleep(2)
space()
space()
kk=open("ali\\search5.txt","r")
slowprint(kk.read(),0.03)
kk.close()
time.sleep(2)
space()
space()
mm=open("ali\\search6.txt","r")
slowprint(mm.read(),0.03)
mm.close()
time.sleep(2)
space()
space()
xx=open("ali\\search7.txt","r")
slowprint(xx.read(),0.03)
xx.close()
time.sleep(2)
space()
space()
cc=open("ali\\search8.txt","r")
slowprint(cc.read(),0.03)
cc.close()
time.sleep(2)
space()
space()
while True:
h=input("Do you Want Sherlock To pick wooden pieces??")
h=h.lower()
if 'yes' in h or "pick" in h:
hg=open("ali\\search9.txt","r")
slowprint(hg.read(),0.03)
hg.close()
time.sleep(2)
space()
space()
addlestone()
elif 'no' in h:
jn=open("ali\\search10.txt","r")
slowprint(jn.read(),0.03)
jn.close()
time.sleep(2)
space()
space()
addlestone()
elif "map" in h:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
elif 'no' in d or 'dont' in d or "don't" in d:
df=open("ali\\walks1.txt","r")
slowprint(df.read(),0.03)
df.close()
time.sleep(2)
space()
space()
elif "map" in d:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def jim_mor():
while True:
ad=open("ali\\bribe.txt","r")
slowprint(ad.read(),0.03)
ad.close()
time.sleep(2)
space()
space()
while True:
ff=open("ali\\2mon.txt","r")
slowprint(ff.read(),0.03)
ff.close()
time.sleep(2)
space()
space()
while True:
aw=open("ali\\mycroft.txt","r")
slowprint(aw.read(),0.03)
aw.close()
time.sleep(2)
space()
space()
while True:
a=input("Do you want Dr. John to go??")
a=a.lower()
if 'yes' in a or 'sure' in a or 'want' in a:
fg=open("ali\\myc2.txt","r")
slowprint(fg.read(),0.03)
fg.close()
time.sleep(2)
space()
space()
kl=open("ali\\myc3.txt","r")
slowprint(kl.read(),0.03)
kl.close()
time.sleep(2)
space()
space()
kk=open("ali\\myc4.txt","r")
slowprint(kk.read(),0.03)
kk.close()
time.sleep(2)
space()
space()
bb=open("ali\\myc5.txt","r")
slowprint(bb.read(),0.03)
bb.close()
time.sleep(2)
space()
space()
kidnapping()
elif 'no' in a or 'stay' in a or 'dont' in a or "don't" in a:
qw=open("ali\\stay.txt","r")
slowprint(qw.read(),0.03)
qw.close()
time.sleep(2)
space()
space()
kidnapping()
elif "map" in a:
map()
space()
continue
else:
invalid()
space()
space()
time.sleep(2)
def court_proceeding():
az=open("ali\\court1.txt","r")
slowprint(az.read(),0.03)
az.close()
time.sleep(2)
space()
space()
while True:
a=input("Would You like to Define Jim Moriarity?")
a=a.lower()
if 'yes' in a or 'sure' in a or 'why not' in a:
aq=open("ali\\defjim.txt","r")
slowprint(aq.read(),0.03)
aq.close()
time.sleep(2)
space()
space()
while True:
aa=open("ali\\judge.txt","r")
slowprint(aa.read(),0.03)
aa.close()
time.sleep(2)
space()
space()
while True:
ab=open("ali\\injail.txt","r")
slowprint(ab.read(),0.03)
ab.close()
time.sleep(2)
space()
space()
while True:
ag=open("ali\\iou.txt","r")
slowprint(ag.read(),0.03)
ag.close()
time.sleep(2)
space()
space()
while True:
c=input("Chat with Jim or first pour tea then chat?")
c=c.lower()
if 'yes' in c or 'chat' in c:
aw=open("ali\\chat.txt","r")
slowprint(aw.read(),0.03)
aw.close()
time.sleep(2)
space()
space()
jim_mor()
elif 'pour' in c or 'tea' in c or 'offer' in c:
af=open("ali\\chat1.txt","r")
slowprint(af.read(),0.03)
af.close()
time.sleep(2)
space()
space()
jim_mor()
elif "map" in c:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def london_tower():
aa=open("ali\\court.txt","r")
slowprint(aa.read(),0.03)
aa.close()
time.sleep(2)
space()
space()
while True:
a=input("Do you want to search the place?")
a=a.lower()
if 'examine' in a or 'look around' in a or 'search' in a or 'yes' in a:
ab=open("ali\\londontower.txt","r")
slowprint(ab.read(),0.03)
ab.close()
time.sleep(2)
space()
space()
while True:
cc=open("ali\\londontower1.txt","r")
slowprint(cc.read(),0.03)
cc.close()
time.sleep(2)
space()
space()
court_proceeding()
elif "map" in a:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
def reichenbach_fall():
while True:
ab=open("ali\\abc.txt","r")
slowprint(ab.read(),0.03)
ab.close()
time.sleep(2)
space()
space()
while True:
a=input("Do you want to reply back? : ")
a=a.lower()
if "ignore" in a or "dont reply" in a or "don't" in a or 'no' in a:
bc=open("ali\\abc1.txt","r")
slowprint(bc.read(),0.03)
bc.close()
time.sleep(2)
space()
space()
london_tower()
elif "message" in a or "reply" in a or "text" in a or 'yes' in a:
cd=open("ali\\abc2.txt","r")
slowprint(cd.read(),0.03)
cd.close()
time.sleep(2)
space()
space()
london_tower()
elif "map" in a:
map()
space()
continue
else:
invalid()
time.sleep(2)
space()
space()
continue
def last():
print("The pink phone rings again ")
space()
while True:
a=input("what do you want to do ?")
a=a.lower()
if "pick" in a or "recieve" in a or "attend" in a:
space()
b = open("4rth\\last.txt","r")
slowprint(b.read(),0.04)
b.close()
while True:
c=input("Go to the complex")
if "go" in c or "sure" in c or "yes" in c:
space()
d= open("4rth\\complex.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
e=input("Press any key to continue....")
space()
f=open("4rth\\complex2.txt","r")
slowprint(f.read(),0.04)
f.close()
l=open("4rth\\complex3.txt","r")
slowprint(l.read(),0.04)
l.close()
space()
time.sleep(2)
print("After few days")
reichenbach_fall()
elif "no" in c or "later" in c or "leave" in c:
space()
print("You have to go to the pool to reveal who the bomber is ")
space()
time.sleep(1)
continue
elif "where" in c:
space()
print("You are at your house and planing to go to the pool.")
space()
time.sleep(1)
continue
elif "map" in c:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "no" in a or "ignore" in a or "no" in a:
space()
print("you have to pick the call its from the bomber")
space()
time.sleep()
continue
elif "map" in a:
map()
space()
continue
elif "where" in a:
space()
print("you are at the police station.")
space()
time.sleep()
else:
space()
invalid()
space()
time.sleep(1)
continue
def connie2():
a=open("4rth\\done.txt","r")
slowprint(a.read(),0.04)
a.close()
space()
time.sleep(1)
while True:
b=input("would you like to tell him your deductions")
b=b.lower()
if "yes" in b or "sure" in b or "tell" in b or "why not" in b:
c=open("4rth\\tell.txt","r")
slowprint(c.read(),0.04)
c.close()
space()
e=input("press any key to continue....")
space()
d=open("4rth\\tell2.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
while True:
print("You have to go to the police station to tell Lestrade")
space()
g=input("what do you want to do ? ")
g=g.lower()
if "police" in g or "lestrade" in g or "go" in g or "yes" in g or "sure" in g:
f=open("4rth\\police3.txt","r")
slowprint(f.read(),0.04)
f.close()
space()
i=input("press any key to continue.....")
space()
h=open("4rth\\police4.txt","r")
slowprint(h.read(),0.04)
h.close()
last()
elif "no" in g or "later" in g:
space()
time.sleep(1)
continue
elif "map" in g:
map()
space()
continue
else:
invalid
space()
time.sleep(1)
continue
elif "no" in b or "later" in b:
space()
print("JOHN : Sherlock you can tell me your deduction I want to hear how close I was.")
space()
time.sleep(1)
continue
elif "where" in b:
space()
print("At Connie Prince house")
space()
time.sleep(1)
continue
elif "map" in b:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
def answer():
while True:
a = input('''What do you reply Lestrade
1) Missing what missing ?
2) You should get more information
3) Join John and find those things''')
a=a.lower()
if '1' in a or "missing what missing ?" in a:
space()
b=open("4rth\\missing.txt","r")
slowprint(b.read(),0.04)
b.close()
space()
time.sleep(1)
while True:
f = open("4rth\\deliver.txt","r")
slowprint(f.read(),0.04)
f.close()
space()
time.sleep(1)
g=input("What do you want to do ? ")
if "go" in g or "sure" in g or "ok" in g or "go" in g or "home" in g:
space()
time.sleep(1)
h = open("4rth\\221b.txt","r")
slowprint(h.read(),0.04)
h.close()
space()
i=input("press any key to continue....")
space()
j=open("4rth\\221b2.txt","r")
slowprint(j.read(),0.04)
j.close()
time.sleep(1)
while True:
l = input("What to do now ? ")
l=l.lower()
time.sleep(1)
if "recieve" in l or "attend" in l or "pick it" in l or "listen" in l:
space()
m=open("4rth\\call4.txt","r")
slowprint(m.read(),0.04)
m.close()
connie2()
elif "ignore" in l or "no" in l or "cannot" in l or "leave" in l or "later" in l:
space()
print("you cannot ignore this call its from bomber")
space()
time.sleep(1)
continue
elif "map" in l:
map()
space()
continue
elif "where" in l:
space()
print("you are at your house 221b inspecting things with lestrade")
space()
time.sleep(1)
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "no" in g or "later" in g or "sorry" in g:
space()
print("You have to go for further investigation.")
space()
time.sleep(1)
continue
elif "where" in g:
space()
k=open("4rth\\w3.txt","r")
slowprint(k.read(),0.04)
k.close()
space()
time.sleep(1)
continue
elif "map" in g:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif '2' in a or "you should get more information" in a:
space()
c=open("4rth\\find.txt","r")
slowprint(c.read(),0.04)
c.close()
space()
time.sleep(1)
print("So you may ask another question.")
space()
continue
elif '3' in a or "join John and find those things" in a:
space()
d=open("4rth\\join.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
time.sleep(1)
continue
elif "map" in a:
map()
space()
continue
elif "where" in a:
space()
e=open("4rth\\w2.txt","r")
slowprint(e.read(),0.04)
e.close()
space()
time.sleep(1)
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
def connie():
a=open("4rth\\police1.txt","r")
slowprint(a.read(),0.04)
a.close()
space()
b=input("press any key to continue....")
space()
c=open("4rth\\police2.txt","r")
slowprint(c.read(),0.04)
c.close()
space()
time.sleep(1)
while True:
d=open("4rth\\lestrade.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
time.sleep(1)
while True:
e=input("Continue the examining ?")
e=e.lower()
if "yes" in e or "sure" in e or "continue" in e or "examine" in e:
space()
f=open("4rth\\further.txt","r")
slowprint(f.read(),0.04)
f.close()
space()
i=input("press any key to continue....")
space()
j=open("4rth\\further2.txt","r")
slowprint(j.read(),0.04)
j.close()
space()
time.sleep(1)
answer()
elif "no" in e or "not" in e or "ignore" in e:
space()
g=open("4rth\\ignore2.txt","r")
slowprint(g.read(),0.04)
g.close()
space()
time.sleep(1)
continue
elif "where" in e:
space()
h=open("4rth\\w2.txt","r")
slowprint(h.read(),0.04)
h.close()
space()
time.sleep(1)
continue
elif "map" in e:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep()
continue
def greatgame3():
space()
a = input("press any key to continue....")
time.sleep(2)
b=open("4rth\\1.txt","r")
slowprint(b.read(),0.04)
b.close()
c=input("press any key to continue....")
d=open("4rth\\2.txt","r")
slowprint(d.read(),0.04)
space()
time.sleep(1)
e=open("4rth\\connie.txt","r")
slowprint(e.read(),0.04)
e.close()
space()
while True:
f=input("what do you want to do ?")
f=f.lower()
if "recieve" in f or "attend" in f or "pick it" in f or "listen" in f:
space()
g=open("4rth\\call.txt","r")
slowprint(g.read(),0.04)
g.close()
time.sleep(1)
space()
h=open("4rth\\call2.txt","r")
slowprint(h.read(),0.04)
h.close()
space()
time.sleep(1)
while True:
print("Phone ringing and this time it's from Lestrade")
space()
k=input("what do you want to do ?")
k=k.lower()
if "recieve" in k or "attend" in k or "pick it" in k or "listen" in k:
space()
l=open("4rth\\call3.txt","r")
slowprint(l.read(),0.04)
l.close()
time.sleep(1)
while True:
space()
o=input('''What do you want to do:
1) Go to police station
2) eat lunch first ''')
o=o.lower()
if "eat" in o or "lunch" in o:
space()
p=open("4rth\\eat.txt","r")
slowprint(p.read(),0.04)
p.close()
space()
time.sleep(1)
connie()
elif "go" in o or "lestrade" in o or "police" in o:
space()
q=open("4rth\\police.txt","r")
slowprint(q.read(),0.04)
q.close()
space()
time.sleep(1)
connie()
elif "map" in o:
map()
space()
continue
elif "where" in o:
space()
r=open("4rth\\w1.txt","r")
slowprint(r.read(),0.04)
r.close()
space()
time.sleep(1)
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "ignore" in k or "no" in k or "cannot" in k or "leave" in k or "later" in k:
space()
m=open("4rth\\ignore1.txt","r")
slowprint(m.read(),0.04)
m.close()
space()
time.sleep(1)
continue
elif "where" in k:
space()
n=open("4rth\\w1.txt","r")
slowprint(n.read(),0.04)
n.close()
space()
time.sleep(1)
continue
elif "map" in k:
map()
space()
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "ignore" in f or "no" in f or "cannot" in f or "leave" in f or "later" in f:
space()
i=open("4rth\\ignore.txt","r")
slowprint(i.read(),0.04)
i.close()
space()
time.sleep(1)
continue
elif "where" in f:
space()
j=open("4rth\\w1.txt","r")
slowprint(j.read(),0.04)
j.close()
space()
time.sleep(1)
continue
elif "map" in f:
map()
space()
else:
space()
invalid()
space()
time.sleep(1)
continue
def police():
a = open("3rd\\police.txt","r")
slowprint(a.read(),0.004)
a.close()
space()
b=input("press any key to continue....")
space()
c=open("3rd\\police2.txt","r")
slowprint(c.read(),0.004)
c.close()
space()
time.sleep(1)
while True:
d=input("Tell them the further assumption ?")
d=d.lower()
if "yes" in d or "sure" in d or "why not" in d or "tell" in d:
space()
e = open("3rd\\further.txt","r")
slowprint(e.read(),0.004)
e.close()
space()
time.sleep(1)
f = input("press any key to continue....")
space()
g=open("3rd\\further2.txt","r")
slowprint(g.read(),0.004)
g.close()
greatgame3()
elif "no" in d or "no" in d or "cannot" in d or "ignore" in d:
space()
e = open("3rd\\ignore3.txt","r")
slowprint(e.read(),0.004)
e.close()
space()
time.sleep(1)
continue
elif "where" in d:
space()
print("you are at the police car pund with watson and Lestrade to inspect the car.")
space()
time.sleep(1)
continue
elif "quit" in d or "exit" in d:
sys.exit()
elif "map" in d:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
def lab2():
while True:
print("JOHN : So what you deduced ? ")
a=input("tell John or not ? ")
if "tell" in a or "ok" in a or "sure" in a or "talk" in a:
space()
b=open("3rd\\john.txt","r")
slowprint(b.read(),0.004)
b.close()
space()
time.sleep(1)
while True:
d=open("3rd\\lab.txt","r")
slowprint(d.read(),0.004)
space()
time.sleep(1)
while True:
e=input("what to do ? ")
e=e.lower()
if "examine" in e or "investigate" in e or "check" in e or "look" in e:
space()
f = open("3rd\\blood.txt","r")
slowprint(f.read(),0.004)
f.close()
space()
time.sleep(1)
while True:
i=input("attend the call ? ")
i=i.lower()
if "yes" in i or "sure" in i or "ok" in i or "attend" in i or "recieve" in i:
space()
j=open("3rd\\blood2.txt","r")
slowprint(j.read(),0.004)
j.close()
space()
time.sleep(1)
police()
elif "no" in i or "donot" in i or "cannot" in i or "later" in i:
space()
k=open("3rd\\ignore2.txt","r")
slowprint(k.read(),0.004)
k.close()
space()
time.sleep(1)
continue
elif "where" in i:
space()
l=open("3rd\\w3.txt","r")
slowprint(l.read(),0.004)
l.close()
space()
time.sleep(1)
continue
elif "quit" in i or "exit" in i:
sys.exit()
elif "map" in i:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "donot" in e or "no" in e or "cannot" in e or "ignore" in e:
space()
g=open("3rd\\ignore1.txt","r")
slowprint(g.read(),0.004)
g.close()
space()
time.sleep(1)
continue
elif "where" in e:
space()
h=open("3rd\\w2.txt","r")
slowprint(h.read(),0.004)
h.close()
space()
time.sleep(1)
continue
elif "quit" in e or "exit" in e:
sys.exit()
elif "map" in e:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "no" in a or "donot" in a or "cannot" in a or "later" in a:
space()
c=open("3rd\\john1.txt","r")
slowprint(c.read(),0.004)
c.close()
space()
time.sleep(1)
continue
elif "quit" in a or "exit" in a:
sys.exit()
elif "map" in a:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
def scene():
a=open("3rd\\scene.txt","r")
slowprint(a.read(),0.004)
a.close()
while True:
b=input("what to do now ?")
b=b.lower()
if "examine" in b or "investigate" in b or "check" in b or "ok" in b:
c=open("3rd\\check.txt","r")
slowprint(c.read(),0.004)
c.close()
time.sleep(1)
space()
while True:
q=input("talk to Mrs. Monkford or not ?")
q=q.lower()
if "talk" in q or "yes" in q or "sure" in q:
w=open("3rd\\mrs.txt","r")
slowprint(w.read(),0.004)
w.close()
space()
t=open("3rd\\mrs2.txt","r")
slowprint(t.read(),0.004)
t.close()
space()
time.sleep(1)
while True:
print("you have to go the JANUS cars for further investigation")
f=input("what to do now ?")
f=f.lower()
if "janus" in f or "go" in f or "ok" in f or "cab" in f:
print("you arrive at JANUS cars and goes to the manager's office.")
while True:
g=input("what to do now ?")
g=g.lower()
if "investigate" in g or "ask" in g or "examine" in g:
A=open("3rd\\janus.txt","r")
slowprint(A.read(),0.004)
A.close()
space()
time.sleep(1)
Z=input("press any key to continue ....")
space()
time.sleep(1)
B=open("3rd\\janus2.txt","r")
slowprint(B.read(),0.004)
B.close()
space()
time.sleep(1)
lab2()
elif "no" in g or "donot" in g or "cannot" in g or "later" in g:
space()
print("you have to investigate him.")
space()
time.sleep(1)
continue
elif "where" in g:
space()
ii=open("3rd\\w.txt","r")
slowprint(ii.read(),0.004)
ii.close()
space()
ttime.sleep(1)
continue
elif "quit" in g or "exit" in g:
sys.exit()
elif "map" in g:
map()
space()
continue
else:
space()
print("You have to in vestigate him.")
space()
invalid()
space()
time.sleep(0.004)
continue
elif "donot" in f or "no" in f or "cannot" in f or "later" in f:
pp=open("3rd\\no2.txt","r")
slowprint(pp.read(),0.004)
pp.close()
space()
time.sleep(1)
continue
elif "where" in f:
yy=open("3rd\\sea.txt","r")
slowprint(yy.read(),0.004)
s.close()
space()
time.sleep(1)
continue
elif "quit" in f or "exit" in f:
sys.exit()
elif "map" in f:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
continue
elif "no" in q or "cannot" in q or "leave" in q or "not" in q:
r=open("3rd\\no.txt","r")
slowprint(r.read(),0.004)
r.close()
space()
time.sleep(1)
continue
elif "where" in q:
s=open("3rd\\sea.txt","r")
slowprint(s.read(),0.004)
s.close()
space()
time.sleep(1)
continue
elif "quit" in q or "exit" in q:
sys.exit()
elif "map" in q:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
elif "lestrade" in b:
space()
c=open("3rd\\lestrade3.txt","r")
slowprint(c.read(),0.004)
c.close()
space()
time.sleep(1)
continue
elif "watson" in b:
d=open("3rd\\watson.txt","r")
slowprint(d.read(),0.004)
d.close()
space()
time.sleep(1)
continue
elif "where" in b:
e=open("3rd\\sea.txt","r")
slowprint(e.read(),0.004)
e.close()
space()
time.sleep(1)
continue
elif "quit" in b or "exit" in b:
sys.exit()
elif "map" in b:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
continue
def call2():
a=open("3rd\\call4.txt","r")
slowprint(a.read(),0.004)
a.close()
space()
time.sleep(1)
b=open("3rd\\call5.txt","r")
slowprint(b.read(),0.004)
b.close()
space()
while True:
c=input("Ask lestrade about the missing car ? ")
c=c.lower()
if "yes" in c or "ask him" in c or "talk to him" in c:
d=open("3rd\\lestrade.txt","r")
slowprint(d.read(),0.004)
d.close()
space()
time.sleep(1)
scene()
elif "no" in c or "donot" in c or "ignore" in c or "cannot" in c:
e = open("3rd\\lestrade2.txt","r")
slowprint(e.read(),0.004)
e.close()
space()
time.sleep(1)
elif "quit" in c or "exit" in c:
sys.exit()
elif "where" in c:
print("you are at the police station with Watson and Lestrade")
space()
time.sleep(1)
continue
elif "map" in c:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
continue
def message2():
a=open("3rd\\car.txt","r")
slowprint(a.read(),0.004)
a.close()
while True:
b=input("you want to pick the phone ? ")
b=b.lower()
if "pick the call" in b or "attend" in b or "recieve" in b or "pick it" in b or "pick call" in b:
c=open("3rd\\call1.txt","r")
slowprint(c.read(),0.004)
c.close()
space()
time.sleep(1)
call2()
elif "no" in b or "ask lestrade" in b or "ask watson" in b or "donot" in b or "cannot" in b:
d=open("3rd\\call2.txt","r")
slowprint(d.read(),0.004)
d.close()
space()
time.sleep(1)
call2()
elif "watson" in b or "ask watson" in b:
e=open("3rd\\call3.txt","r")
slowprint(e.read(),0.004)
e.close()
space()
time.sleep(1)
call2()
elif "quit" in b or "exit" in b :
sys.exit()
elif "map" in b:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
#Ahmed ashraf
def greatgame2(): #3rd case
a=open("3rd\\1.txt","r")
slowprint(a.read(),0.004)
a.close()
e=open("3rd\\2.txt","r")
slowprint(e.read(),0.004)
e.close()
space()
time.sleep(1)
while True:
b=input("LESTRADE : lets have a cup of tea ")
b=b.lower()
if "drink" in b or "ok" in b or "sure" in b or "why not" in b or "yes" in b:
c=open("3rd\\tea.txt","r")
slowprint(c.read(),0.004)
c.close()
space()
time.sleep(1)
message2()
elif "no" in b or "not now" in b or "never" in b or "cannot" in b or "later" in b:
d=open("\\ignore.txt","r")
slowprint(d.read(),0.004)
d.close()
space()
time.sleep(1)
message2()
elif "quit" in b or "exit" in b:
sys.exit()
elif "map" in b:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
def call(): #AHMED ASHRAF SP18-BSE-009
while True:
q=input("you want to examine shoes for clues or leave them ? ")
q=q.lower()
if "leave" in q or "ignore" in q or "donot examine" in q:
ww=open("2nd\\ignore2.txt","r")
slowprint(ww.read(),0.04)
ww.close()
space()
time.sleep(1)
continue
elif "examine" in q or "investigate" in q or "inspect" in q or "check" in q:
qq=open("2nd\\examine2.txt","r")
slowprint(qq.read(),0.04)
qq.close()
space()
time.sleep(1)
while True:
p=input('''where you want to Search for information about carl power's
1) Locker
2) Swiming pool''')
p=p.lower()
if "swiming pool" in p or "2" in p:
o=open("2nd\\swiming.txt","r")
slowprint(o.read(),0.04)
o.close()
space()
time.sleep(1)
continue
elif "locker" in p or '1' in p:
pp=open("2nd\\locker.txt","r")
slowprint(pp.read(),0.04)
pp.close()
while True:
i=input('''what do you think the reason could be of his death :
1) Murder
2) sucide
3) natural ''')
i=i.lower()
if "murder" in i or "killed" in i or "1" in i:
k=open("2nd\\killed.txt","r")
slowprint(k.read(),0.04)
k.close()
space()
time.sleep(1)
l=open("2nd\\1solve.txt","r")
slowprint(l.read(),0.04)
l.close()
greatgame2()
elif "sucide" in i or '2' in i:
ss=open("2nd\\sucide.txt","r")
slowprint(ss.read(),0.04)
ss.close()
space()
time.sleep(1)
continue
elif "natural" in i or "3" in i:
n=open("2nd\\natural.txt","r")
slowprint(n.read(),0.04)
n.close()
space()
time.sleep(1)
continue
elif "quit" or "exit" in i:
sys.exit()
else:
invalid()
space()
time.sleep(1)
elif "quit" in p or "exit" in p:
sys.exit()
elif "map" in p:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
elif "where" in q:
print("still in the rooom")
space()
time.sleep(1)
continue
elif "map" in q:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
a=['Pink Iphone','gun','microscope','gloves']
def envelope(): #AHMED ASHRAF SP18-BSE-009
space()
time.sleep(1)
q=open("2nd\\envelop1.txt","r")
slowprint(q.read(),0.04)
q.close()
while True:
w = input("what to do now ? ")
w = w.lower()
if "go there" in w or "lets go" in w or "move" in w:
space()
time.sleep(1)
e=open("2nd\\221c.txt","r") #opening the file
slowprint(e.read(),0.04)
e.close()#closing the file
space()
time.sleep(1)
while True:
r=input("What to do now ? ")
r=r.lower()
space()
time.sleep(1)
if "examine" in r or "inspect" in r:
t=open("2nd\\examine1.txt","r")
slowprint(t.read(),0.04)
t.close()
space()
time.sleep(1)
while True:
qq = input("Want to pick the call or not ? ")
qq = qq.lower()
if "pick it" in qq or "pick" in qq and not "donot pick" in qq or "recieve" in qq or "attend" in qq:
ww=open("2nd\\call.txt","r")
slowprint(ww.read(),0.04)
ww.close()
space()
time.sleep(1)
call()
elif "ignore" in qq or "donot pick" in qq:
ee=open("2nd\\ignore1.txt","r")
slowprint(ee.read(),0.04)
ee.close()
space()
time.sleep(1)
elif "lestrade" in qq or "watson" in qq:
print("Its better you pick the call ")
space()
time.sleep(1)
elif "quit" in qq or "exit" in qq:
sys.exit()
elif "map" in qq:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
elif "leave" in r or "donot check" in r or "ignore" in r or "cannot" in r:
y=open("2nd\\donot1.txt","r")
slowprint(y.read(),0.04)
y.close()
space()
time.sleep(1)
elif "where" in r:
print("You are in 221C in the basment of Mrs. Hudesn house.")
space()
time.sleep(1)
elif "quit" in r or "exit" in r:
sys.exit()
elif "map" in r:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
elif "donot go" in w or " we cannot go" in w or "donot" in w:
space()
time.sleep(1)
r=open("2nd\\donot.txt","r") #opening the file
slowprint(r.read(),0.04)
r.close() #closing the file
space()
time.sleep(1)
elif "where" in w:
print("you are standing in the police station")
space()
time.sleep(1)
elif "quit" in w or "exit" in w:
sys.exit()
elif "map" in w:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
def thegreatgame(): #AHMED ASHRAF SP18-BSE-009
while True:
q=open("2nd\\great.txt","r") #reading from great in 2nd folder
slowprint(q.read(),0.04) #calling to print character by character
q.close()
time.sleep(1)
w=open("2nd\\bomb.txt","r") #getting the text
slowprint(w.read(),0.04)
w.close()
while True:
call = input("what do you want to do ? ") #input from the user
call = call.lower()
space()
time.sleep(1)
if "attend" in call or "recieve" in call or "pick call" in call:
a=open("2nd\\attend.txt","r")
slowprint(a.read(),0.04)
a.close()
space()
time.sleep(1)
while True:
d=open("2nd\\cab.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
time.sleep(1)
while True:
f=input("what to do now ? ")
f=f.lower()
if "examine" in f or "inspect" in f or "open" in f:
g=open("2nd\\envelope.txt","r")
slowprint(g.read(),0.04)
g.close()
envelope()
elif "ignore" in f or "walk" in f or "talk" in f:
print("Lestrade : Sherlock inspect the envelope ")
space()
time.sleep(1)
elif "where" in f:
print("you are in the police stattion with John")
space()
time.sleep(1)
elif "quit" in f or "exit" in f:
sys.exit()
elif "map" in f:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
elif "ignore" in call or "donot attend" in call or "donot recieve" in call or "donot pick" in call:
s=open("2nd\\ignore.txt","r")
slowprint(s.read(),0.04)
s.close()
space()
time.sleep(1)
elif "quit" in call or "exit" in call:
sys.exit()
elif "map" in call:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
def chellenge(): #the final chellenge with the cabbie
dd=open("files\\chellenge1.txt","r")
slowprint(dd.read(),0.04)
dd.close()
time.sleep(1)
space()
while True:
tt=input("YOU WANT TO PLAY THE GAME ? ")
tt = tt.lower()
if "yes" in tt or "sure" in tt or "yea" in tt:
time.sleep(1)
space()
while True:
yy=input("cabbie offers you the same what will you choose pills or the gun ? ")
yy=yy.lower()
if "pill" in yy :
print("You die because you choose the wrong pill try again ")
time.sleep(1)
space()
continue
elif "gun" in yy:
ss=open("files\\gun.txt","r")
slowprint(ss.read(),0.04)
ss.close()
time.sleep(1)
space()
print("MYSTERY SOLVED") #1st mystery solved
time.sleep(2)
space()
thegreatgame() # calling the next function of next level
elif "quit" or "exit" in yy:
sys.exit()
elif "map" in yy:
map()
space()
continue
else:
invalid()
continue
elif "no" in tt or "cannot" in tt:
print("YOU HAVE TO PLAY THE GAME ")
space()
invalid()
time.sleep(1)
space()
elif "quit" in tt or "exit" in tt:
sys.exit()
elif "map" in tt:
map()
space()
continue
else:
invalid()
time.sleep(1)
space()
continue
def taxi1():
while True:
rr=input("what do you want to ask the cabbie now ? ") #asking the cabbie questions
rr=rr.lower()
if "who are you" in rr:
time.sleep(1)
space()
print("Cabbie : I'm the person who killed that lady. ")
space()
time.sleep(1)
elif "who am i" in rr:
print("you are detective Sherlock holmes ")
time.sleep(1)
space()
continue
elif "why are you doing this" in rr:
time.sleep(1)
space()
print("Cabbie : I'm doing this for my childern everytime I take a life $100 are gifted to them.")
print("Cabbie : I don't killed her i just gave her an offer and she killed herself. I just play a game ")
time.sleep(1)
space()
chellenge()
elif "how you killed her" in rr:
space()
time.sleep(1)
print("Cabbie : I don't killed her i just gave her an offer and she killed herself ")
space()
time.sleep(1)
chellenge()
elif "quit" in rr or "exit" in rr:
sys.exit()
elif "map" in rr:
map()
space()
continue
else:
invalid()
space()
time.sleep(1)
def flat1(): #AHMED ASHRAF SP18-BSE-009
while True:
qq=open("files\\flat1.txt","r") #going to flat after finding the bag
slowprint(qq.read(),0.04)
qq.close()
space()
time.sleep(1)
while True:
ww=input("what to do now ? ") #cabie arrives
ww=ww.lower()
if "outside" in ww or "go in taxi" in ww or "go" in ww or "go in cab" in ww or "leave flat" in ww:
ee=open("files\\taxi1.txt","r") #the taxi journy loading
slowprint(ee.read(),0.04)
ee.close()
space()
time.sleep(1)
taxi1()
elif "dont" in ww or "stay" in ww or "don't" in ww or "no" in ww or "cant" in ww or "can't" in ww or "cannot" in ww: #have to go
print("You have to go with cabbie to solve the mystery ")
continue
elif "ask mrs hudson" in ww or "who" in ww or "did not" in ww: #have to go with cabbie to continue the game
print("Mrs. Hudson : Sorry dear you have to ask him about this ")
continue
elif "quit" in ww or "exit" in ww:
sys.exit()
elif "map" in ww:
map()
space()
continue
else:
invalid()
def trash():
while True:
time.sleep(1.5)
space()
ww=open("files\\bin.txt","r")
slowprint(ww.read(),0.04)
ww.close()
space()
time.sleep(2)
qq=open("files\\watson1.txt","r")
slowprint(qq.read(),0.04)
qq.close()
ww=input("what do you want to do now ? ")
ww=ww.lower()
if "dont" in ww or "don't" in ww or "no" in ww or "cant" in ww or "can't" in ww or "not" in ww:
space()
time.sleep(1)
ss=open("files\\eatn1.txt","r")
slowprint(ss.read(),0.04)
ss.close()
space()
time.sleep(1)
flat1()
elif "yes" in ww or "sure" in ww or "why not" in ww or "ok" in ww or "okay" in ww:
space()
time.sleep(1)
ee=open("files\\eat1.txt","r")
slowprint(ee.read(),0.04)
ee.close()
space()
time.sleep(1)
flat1()
elif 'quit' in ww:
sys.exit()
elif "map" in ww:
map()
space()
continue
else:
invalid()
continue
def pink():
while True:
time.sleep(1)
space()
a=open("files\\pink1.txt","r")
slowprint(a.read(),0.04)
a.close()
space()
while True:
do=input("what do you want to do ? ")
do=do.lower()
if 'look' in do:
time.sleep(1)
b=open("files\\looka1.txt","r")
slowprint(b.read(),0.04)
b.close()
space()
continue
time.sleep(1)
elif "map" in do:
map()
space()
continue
elif 'examine' in do or "inspect" in do or "detect" in do:
time.sleep(1)
c=open("files\\examine1.txt","r")
slowprint(c.read(),0.04)
c.close()
space()
while True:
space()
time.sleep(1.5)
r=open('files\\search1.txt','r')
slowprint(r.read(),0.04)
r.close()
space()
time.sleep(1.5)
w=input("where you want to search ? (search in street go left or right)")
w=w.lower()
if 'right' in w:
time.sleep(1.5)
space()
q=open("files\\right1.txt","r")
slowprint(q.read(),0.04)
q.close()
space()
print("Going back to the building and standing infront of it again ")
time.sleep(1.5)
elif "map" in w:
map()
space()
continue
elif 'left' in w:
space()
time.sleep(1.5)
p=open("files\\left1.txt","r")
slowprint(p.read(),0.04)
p.close()
time.sleep(1.5)
space()
while True:
z=input("Where you want to search now ?")
z=z.lower()
if "gutter" in z:
space()
time.sleep(1.5)
qq=open("files\\guter.txt","r")
slowprint(qq.read(),0.04)
qq.close()
space()
time.sleep(1.5)
continue
elif "trash" in z or "bin" in z or "garbage" in z:
trash()
elif "plant" in z or "green" in z or "small plants" in z:
time.sleep(1.5)
space()
ee=open("files\\plant.txt","r")
slowprint(ee.read(),0.04)
ee.close()
continue
elif "all" in z or "every" in z or "them" in z:
time.sleep(1.5)
space()
qq=open("files\\guter.txt","r")
slowprint(qq.read(),0.04)
qq.close()
space()
time.sleep(1.5)
ee=open("files\\plant.txt","r")
slowprint(ee.read(),0.04)
ee.close()
space()
trash()
elif 'quit' in z:
sys.exit()
elif "map" in z:
map()
space()
continue
else:
invalid()
continue
else:
space()
invalid()
space()
elif 'walk' in do:
time.sleep(1)
d=open("files\\walk1.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
time.sleep(1)
continue
elif 'where' in do:
g=open("files\\where1.txt","r")
slowprint(g.read(),0.04)
g.close()
elif 'lestrade' in do:
i=open("files\\lestrade1.txt","r")
slowprint(i.read(),0.04)
i.close()
elif "map" in do:
map()
space()
continue
elif 'quit' in do:
sys.exit()
else:
invalid()
space()
continue
time.sleep(1)
def credits(): #AHMED ASHRAF SP18-BSE-009
c=open("files\\credits.txt","r")
slowprint(c.read(),0.04)
c.close()
space()
def help(): #AHMED ASHRAF SP18-BSE-009
h=open("files\\help.txt","r")
slowprint(h.read(),0.04)
h.close()
space()
def play(): #AHMED ASHRAF SP18-BSE-009
while True:
f=open("files\\1.txt","r")
slowprint(f.read(),0.04)
f.close()
space()
while True:
ride = input('''want to go with
1) Lestrade in police car
2) cab/Taxi ''')
ride = ride.lower()
if 'cab' in ride or 'taxi' in ride or '2' in ride:
c=open("files\\cab.txt","r")
slowprint(c.read(),0.04)
c.close()
pink()
elif 'lestrade' in ride or '1' in ride:
d=open("files\\lestrade.txt","r")
slowprint(d.read(),0.04)
d.close()
space()
pink()
elif 'quit' in ride or 'exit' in ride :
sys.exit()
elif "map" in ride:
map()
space()
continue
else:
space()
invalid()
space()
time.sleep(1)
continue
def menue(): #AHMED ASHRAF SP18-BSE-009
while True:
m='''PRESS 1 TO PLAY
PRESS 2 FOR HELP/INSTRUCTIONS
PRESS 3 FOR CREDITS
PRESS 4 FOR CHARACTERS
PRESS 5 FOR MAP
PRESS 6 FOR EXIT : '''
menu_1 = input(m)
menu_1= menu_1.lower()
if '1' in menu_1 or 'play' in menu_1:
space()
play()
elif '2' in menu_1 or 'help' in menu_1 or 'instructions' in menu_1:
space()
help()
elif '3' in menu_1 or 'credits' in menu_1:
space()
credits()
elif '4' in menu_1 or 'characters' in menu_1:
space()
time.sleep(1)
aa=open('files\\characters.txt','r')
slowprint(aa.read(),0.04)
aa.close()
space()
elif '6' in menu_1 or 'quit' in menu_1 or 'exit' in menu_1:
space()
break
elif "map" in menu_1 or '5' in menu_1:
map()
space()
continue
else:
invalid()
space()
continue
def start(): #AHMED ASHRAF SP18-BSE-009
a=open("files\\intro1.txt","r")
slowprint(a.read(),0.04)
a.close()
time.sleep(0.5)
b=open('files\\intro2.txt','r')
slowprint(b.read(),0.04)
b.close()
space()
menue()
start()
| true |
1a748702779e78b729420b2e577403dec15795fc | Python | drageryd/IronPythonEngine | /Examples/test4.py | UTF-8 | 185 | 2.59375 | 3 | [] | no_license | import time
print("HELLO FROM PYTHON")
print(cube.get_name())
print(cube.get_commands())
cube.test(600)
time.sleep(2)
cube.test2(200)
time.sleep(5)
print("hej")
time.sleep(5)
| true |
0a82a45011518a6d95c9186c5c33922e1957655d | Python | cjustus7984/Insight-interview | /VisualizeClusters.py | UTF-8 | 6,627 | 2.671875 | 3 | [] | no_license |
__author__ = 'Chris'
import os
import sys
import csv
import string
import datetime
import statistics
import numpy as np
import glob
import time
import itertools
import matplotlib.pyplot as mplt
#from pylab import *
from collections import defaultdict
def SpreadCmp( CL1, CL2 ):
if CL1.GetSpread() < CL2.GetSpread() : return -1
elif CL1.GetSpread() > CL2.GetSpread(): return 1
else: return 0
def CalcDist( vec1, vec2 ):
if not len(vec1) == len(vec2):
return 99999.
else:
sumsq = 0.0
for i in range(0,len(vec1)):
sumsq += ((vec1[i]-vec2[i])/vec2[i])**2
return sqrt(sumsq)
class Stock:
def __init__(self, symb):
self.Symbol = symb
def AddFactors(self, factors ):
self.Factors = factors
def AddFactor(self, factor ):
self.Factors.append(factor)
def EditFactor(self, factor, pos ):
if pos < 0 or pos >= len(self.Factors):
print "Could not edit factor at position %d. Out of range!" % pos
return 0
else:
self.Factors[pos] = factor
def GetSymbol(self):
return self.Symbol
def GetFactors(self):
return self.Factors
def GetFactor( self, pos ):
if pos < 0 or pos > len(self.Factors):
print "Could not return factor at position %d. Out of range!" % pos
return 0
else:
return self.Factors[pos]
class Cluster:
def __init__(self, number, cent = []):
self.Number = number
self.Size = 0
self.Stocks = []
self.Center = cent
def AddStock(self, stock):
self.Stocks.append( stock );
self.Size += 1
def GetCenter(self):
if not self.Size:
print "There are no stocks in this cluster"
return 0
else:
return self.Center / self.Size
def GetCenter( self, pos ):
if pos < 0 or pos > 3:
print "Could not return center at position %d. Out of range!" % pos
return 0
elif not self.Size:
print "There are no stocks in this cluster"
return 0
else:
return self.Center[pos] / self.Size
def CalculateSpread( self ):
sumdist = 0.0
sumdist2 = 0.0
for stk in self.Stocks:
dist = CalcDist( stk.Factors, self.Center )
sumdist += dist
sumdist2 += dist*dist
if not self.Size > 1: self.Spread = 0.0
else:
self.Spread = sqrt( (sumdist2 - sumdist*sumdist/self.Size) / (self.Size-1) )
def GetSpread(self):
return self.Spread
class Trial:
def __init__(self, factors):
self.Clusters = []
self.Nclusters = 0
self.Nfactors = factors
if ( self.Nfactors > 5 ): self.Nfactors = 5
def AddCluster(self, cluster):
self.Clusters.append(cluster)
self.Nclusters += 1
def PlotIteration(self,):
fig, plot01 = mplt.subplots( self.Nfactors-1,1,sharex=True)
mins = [ 9999. for i in range(self.Nfactors) ]
maxs = [ -9999. for i in range(self.Nfactors) ]
Cindx =0
for C in self.Clusters:
Fs = [ [] for i in range(self.Nfactors) ]
clustcolor = colors.next()
markerstyle = markers.next()
C0 = [[C.Center[i]] for i in range(self.Nfactors)]
Sindx=0
for S in C.Stocks:
for i in range(self.Nfactors):
Fs[i].append(S.GetFactor(i))
if ( Fs[i][Sindx] > maxs[i]): maxs[i] = Fs[i][Sindx]
if ( Fs[i][Sindx] < mins[i]): mins[i] = Fs[i][Sindx]
Sindx += 1
Label = "C%d" % C.Number
for i in range(1,self.Nfactors):
CentLabl = "%d" % C.Number
plot01[i-1].scatter(Fs[0], Fs[i], color=clustcolor, marker=markerstyle, alpha=0.5, label=Label)
plot01[i-1].scatter(C0[0], C0[i], color='k', marker='X', alpha=0.5)
plot01[i-1].set_ylim([mins[i],maxs[i]])
plot01[i-1].set_xlabel('F0')
plot01[i-1].set_ylabel( 'F%d' % i )
plot01[i-1].set_title('F%d/F0' % i)
Cindx += 1
if Cindx >= 20: break;
mplt.legend(bbox_to_anchor=(0.95, 3.5), loc=2, borderaxespad=0.)
mplt.show()
##########################################################################################
ItersList = []
ClusterList = []
NClusters = 0
np.random.seed(11101)
markers = itertools.cycle((',','+','.','o','*'))
colors = itertools.cycle(('xkcd:blue','xkcd:red','xkcd:violet','xkcd:magenta','xkcd:teal', 'xkcd:lime green', 'xkcd:orange'))
def TotSpdCmp( CL1, CL2 ):
if CL1.GetTotSpread() < CL2.GetTotSpread(): return -1
elif CL1.GetTotSpread() > CL2.GetTotSpread(): return 1
else: return 0
def LoadClusters( filename ):
print "Loading file %s" % filename
with open( filename, 'r' ) as file:
print "File opened"
reader = csv.reader( file, delimiter="\t" )
Stocks = []
number = -1
size = 0
totspd = 9999.;
NumFactors = 0
for line in reader:
if not len(line): continue
if line[0] == 'I':
NumFactors = int(line[1])
Itr = Trial(NumFactors);
ItersList.append(Itr)
elif line[0] == 'C':
number = int(line[1])
size = int(line[2])
centroid = []
for i in range(NumFactors):
centroid.append(float(line[3+i]))
CL = Cluster(number, centroid)
ItersList[len(ItersList)-1].AddCluster(CL)
elif line[0] == 'S':
symbol = line[1]
factors = []
for i in range(NumFactors):
factors.append(float(line[2+i]))
stk = Stock(symbol);
stk.AddFactors( factors )
ItersList[len(ItersList)-1].Clusters[len(ClusterList)-1].AddStock(stk)
return True
if __name__ == '__main__':
if ( len(sys.argv) < 2 ): sys.exit(1)
Filename = sys.argv[1]
print "Loading clusters from file %s" % Filename
LoadClusters( Filename )
ItersList[len(ItersList)-1].PlotIteration()
| true |
82d6346dd5b6689efe9b2669c4637d742bf24ff9 | Python | BruceEckel/ThinkingInPython | /residual/code/PythonForProgrammers/list.py | UTF-8 | 106 | 3.6875 | 4 | [] | no_license | # QuickPython/list.py
list = [ 1, 3, 5, 7, 9, 11 ]
print(list)
list.append(13)
for x in list:
print(x) | true |
432a473dd5e3bf67a2d58f76973d1e6f10d8c354 | Python | bartoszmaleta/4th-Self-instructed-week | /errors_and_exceptions_cd2.py | UTF-8 | 1,935 | 4.5 | 4 | [] | no_license | # print('------------------------------------------------ The with statement, Using the exception object')
# Code works, COMMENTED JUST TO RUN THE NEWEST CODE FASTER!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# try:
# age = int(input("Please enter your age: "))
# except ValueError as err:
# print(err, '\nwrong type')
# print('You enterd incorrect age input: %s ' % err)
# print('------------------------------------------------ Raising exceptions')
# # We can raise exceptions ourselves using the raise statement:
# try:
# age = int(input("Please enter your age: "))
# if age < 0:
# raise ValueError("\n{} is not a valid age. Age must be positive or zero.".format(age))
# except ValueError as err:
# print("You entered incorrect age input: %s" % err)
# else:
# print("I see that you are %d years old." % age)
# Here are a few common exception types which we are likely to raise in our own code:
# TypeError: this is an error which indicates that a variable has the wrong type for some operation.
# We might raise it in a function if a parameter is not of a type that we know how to handle.
# ValueError: this error is used to indicate that a variable has the right type but the wrong value.
# For example, we used it when age was an integer, but the wrong kind of integer.
# NotImplementedError: we will see in the next chapter how we use this exception to indicate
# that a class’s method has to be implemented in a child class.
print('------------------------------------------------ Raising exceptions')
# Something we may want to do is raise an exception that we have just
# intercepted – perhaps because we want to handle it partially in the current function,
# but also want to respond to it in the code which called the function:
try:
age = int(input("Please enter your age: "))
except ValueError as err:
print("You entered incorrect age input: %s" % err)
raise err | true |
137e9b4de972f739a1bc4f5fc76deef35bcc34d8 | Python | gregmuellegger/annotatedocs | /annotatedocs/contrib/metrics/textstats.py | UTF-8 | 832 | 2.703125 | 3 | [] | no_license | from __future__ import division
import nltk
from ... import Metric, NodeType, metrics
__all__ = ('TextStats',)
@metrics.require(NodeType)
class TextStats(Metric):
'''
Adds some statistics like average word length, sentence length etc.
'''
def limit(self, nodeset):
return nodeset.filter(is_content_type=True)
def apply(self, node, document):
text = node.node.astext()
sentences = [
nltk.word_tokenize(sentence)
for sentence in nltk.sent_tokenize(text)]
word_count = sum([len(sentence) for sentence in sentences])
sentence_count = len(sentences)
node['char_count'] = len(text)
node['word_count'] = word_count
node['sentence_count'] = sentence_count
node['avg_sentence_length'] = word_count / sentence_count
| true |
4f60e22e9385d85040c6f5892751b1ee066d07c5 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_135/590.py | UTF-8 | 1,262 | 3.140625 | 3 | [] | no_license | '''
Created on Apr 11, 2014
@author: mandy
'''
import sys
numLinePerTest = 10
def magicTrick(fileName):
f = open(fileName, 'r')
lines = f.readlines()
numTests = int(lines[0])
for i in xrange(numTests):
currentRow = i*numLinePerTest+1
firstRow = getChosenRowData(currentRow, lines)
currentRow += int(0.5*numLinePerTest)
secRow = getChosenRowData(currentRow, lines)
output(i, firstRow, secRow)
def getChosenRowData(startRowID, lines):
rowChoice = int(lines[startRowID])
return lines[startRowID+rowChoice].strip('\n').split(' ')
def output(currentRow, firstRow, secRow):
count = 0
card = -1
for f in firstRow:
if f in secRow:
count += 1
card = f
if count > 1:
print 'Case #'+str(currentRow+1)+': Bad magician!'
return
if count == 0:
print 'Case #'+str(currentRow+1)+': Volunteer cheated!'
elif count == 1:
print 'Case #'+str(currentRow+1)+': '+card
return
def main():
currentPath = sys.argv[0]
currentPath = currentPath[:currentPath.rfind('/')]
file = currentPath+'/A-small-attempt4.in'
magicTrick(file)
if __name__ == '__main__':
main() | true |
419f30cf626a724480d29520f476d1b1ef1bda40 | Python | Magnus9/pandairc | /irctab.py | UTF-8 | 6,776 | 2.75 | 3 | [] | no_license |
from nick import Nick
import gtk
import gobject
import pango
import time
class IRCTab(gtk.VBox):
'''
The IRCTab class represents a tab that contains
textural data. This can either be a private chat
with a person, or a channel. Other than keeping track
of the data within the tab, it also keeps track of the name
of the tab and the mode (if the tab is a channel).
'''
def __init__(self, ui, ident, treeview=False):
gtk.VBox.__init__(self)
self.ui = ui
self.ident = ident
hpaned = gtk.HPaned()
self.init_textview(hpaned)
if treeview:
self.init_treeview(hpaned)
self.pack_start(hpaned)
self.init_entry()
self.show_all()
def init_textview(self, hpaned):
self.view = gtk.TextView(gtk.TextBuffer(self.ui.get_tag_table()))
self.view.set_editable(False)
self.view.set_cursor_visible(False)
self.view.set_wrap_mode(gtk.WRAP_WORD | gtk.WRAP_CHAR)
self.view.modify_font(pango.FontDescription("Monospace bold 11"))
'''
Pack it into a scrolled window.
'''
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_ALWAYS)
scroll.set_size_request(200, 200)
scroll.add(self.view)
hpaned.pack1(scroll, True, False)
self.view.connect("size-allocate", self.ui.scroll_textview,
scroll)
def init_treeview(self, hpaned):
self.tree = gtk.TreeView()
image_column = gtk.TreeViewColumn(None, gtk.CellRendererPixbuf(),
pixbuf=0)
self.tree.append_column(image_column)
text_column = gtk.TreeViewColumn(None, gtk.CellRendererText(),
text=1)
self.tree.append_column(text_column)
self.tree.set_headers_visible(False)
self.init_list_store()
self.tree.connect("row-activated", self.ui.row_activated,
self.list_store)
self.tree.connect("button-press-event", self.ui.menu_nicknames,
self)
'''
Pack it into a scrolled window.
'''
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_AUTOMATIC)
scroll.set_size_request(100, 200)
scroll.add(self.tree)
hpaned.pack2(scroll, True, False)
def init_entry(self):
self.entry = gtk.Entry()
self.entry.set_can_focus(True)
self.entry.connect("activate", self.ui.handle_input,
self)
self.pack_start(self.entry, False)
def init_list_store(self):
self.list_store = gtk.ListStore(gtk.gdk.Pixbuf,
gobject.TYPE_STRING)
self.tree.set_model(self.list_store)
self.nicks = []
def nick_add(self, nick):
self.nicks.append(Nick(nick))
self.nicks.sort(key=self.nick_sort)
self.nick_add_row(self.nick_index(nick))
def nick_add_list(self, nicks):
for i in nicks:
if i[0] == "@":
self.nicks.append(Nick(i[1:], True))
elif i[0] == "+":
self.nicks.append(Nick(i[1:], voice=True))
else:
self.nicks.append(Nick(i))
def nick_del(self, nick):
index = self.nick_index(nick)
self.nicks.pop(index)
self.list_store.remove(self.nick_get_iter(index))
def nick_update_nick(self, old_nick, new_nick):
index = self.nick_index(old_nick)
self.list_store.remove(self.nick_get_iter(index))
self.nicks[index].set_nick(new_nick)
self.nicks.sort(key=self.nick_sort)
self.nick_add_row(self.nick_index(new_nick))
def nick_set_mode(self, nick, mode):
idx = self.nick_index(nick)
obj = self.nicks[idx]
if mode == "+o":
if obj.get_op():
return
obj.set_op(True)
elif mode == "-o":
if not obj.get_op():
return
obj.set_op(False)
elif mode == "+v":
if obj.get_voice():
return
obj.set_voice(True)
elif mode == "-v":
if not obj.get_voice():
return
obj.set_voice(False)
self.list_store.remove(self.nick_get_iter(idx))
self.nicks.sort(key=self.nick_sort)
self.nick_add_row(self.nick_index(nick))
def nick_index(self, nick):
for i in range(len(self.nicks)):
if self.nicks[i].get_nick() == nick:
return i
return None
def nick_get_iter(self, index):
return self.list_store.get_iter_from_string(
"%d" % index)
def nick_add_row(self, index):
nick = self.nicks[index]
img_op, img_voice = self.ui.get_images()
if nick.get_op():
self.list_store.insert(index, [img_op, nick.get_nick()])
elif nick.get_voice():
self.list_store.insert(index, [img_voice, nick.get_nick()])
else:
self.list_store.insert(index, [None, nick.get_nick()])
def nick_add_rows(self):
self.nicks.sort(key=self.nick_sort)
for i in range(len(self.nicks)):
self.nick_add_row(i)
def nick_get_obj(self, nick):
return self.nicks[self.nick_index(nick)]
def nick_sort(self, nick):
if nick.get_op():
return "0" + nick.get_nick()
elif nick.get_voice():
return "1" + nick.get_nick()
else:
return "2" + nick.get_nick()
def get_topic(self, topic):
return self.topic
def set_topic(self, topic):
self.topic = topic
def set_topic_creator(self, creator):
self.topic_creator = creator
def set_topic_ts(self, ts):
self.topic_ts = time.ctime(int(ts))
def gen_topic_string(self):
return "Topic for %s: %s\nTopic set by %s at %s" % \
(self.ident, self.topic, self.topic_creator,
self.topic_ts)
def set_mode(self, mode):
self.mode = mode
def get_mode(self, mode):
return self.mode
def get_ident(self):
return self.ident
def get_entry(self):
return self.entry
def is_channel(self):
return True if hasattr(self, "tree") else False
def append_data(self, data, tag="generic"):
string = "%s %s\n" % (time.strftime("%H:%M"), data)
buf = self.view.get_buffer()
buf.insert_with_tags_by_name(buf.get_end_iter(), string,
tag)
self.ui.colorize_irctab_label(self) | true |
127c69788e1466c59ce934c7037ae4469cca5850 | Python | FazedAI/OpenSeq2Seq | /open_seq2seq/optimizers/lr_policies.py | UTF-8 | 7,842 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2017 NVIDIA Corporation
"""
Module containing various learning rate policies. Learning rate policy can
be any function that takes arbitrary arguments from the config (with additional
``global_step`` variable provided automatically) and returns learning rate
value for the current step.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import math
import tensorflow as tf
from tensorflow.python.framework import ops
def fixed_lr(global_step, learning_rate):
"""Fixed learning rate policy.
This function always returns ``learning_rate``, ignoring ``global_step``
value.
Args:
global_step: global step TensorFlow tensor (ignored for this policy).
learning_rate (float): fixed learning rate to use.
Returns:
learning rate at step ``global_step``.
"""
return learning_rate
def piecewise_constant(global_step, learning_rate, boundaries,
decay_rates, steps_per_epoch=None):
"""Piecewise constant learning rate decay.
When defined in the config, only ``boundaries`` and ``decay_rates`` need to
be provided (other parameters are automatically populated by
:class:`Model<models.model.Model>` class). ``boundaries`` are treated as
epochs if ``num_epochs`` is provided in the config, otherwise treated as
steps.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
boundaries (list): could be either defined in steps
(if ``batches_per_epoch=None``) or in epochs if ``batches_per_epoch``
parameter is defined.
decay_rates: multiplier of the initial learning rate for each boundary.
steps_per_epoch: number of batches in one training epoch. If provided,
boundaries are treated as epochs, otherwise as steps.
Returns:
learning rate at step ``global_step``.
"""
if steps_per_epoch is not None:
boundaries = [steps_per_epoch * epoch for epoch in boundaries]
decay_rates = [1.0] + decay_rates
vals = [learning_rate * decay for decay in decay_rates]
return tf.train.piecewise_constant(global_step, boundaries, vals)
def exp_decay(global_step, learning_rate, decay_steps, decay_rate,
use_staircase_decay, begin_decay_at=0, min_lr=0.0):
"""Exponential decay learning rate policy.
This function is equivalent to ``tensorflow.train.exponential_decay`` with
some additional functionality. Namely, it adds ``begin_decay_at`` parameter
and ``min_lr`` parameter which are the first step to start decaying learning
rate and minimal value of the learning rate correspondingly.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
decay_rate (float): the rate of the decay.
use_staircase_decay (bool): whether to use staircase decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate.
Returns:
learning rate at step ``global_step``.
"""
new_lr = tf.cond(
global_step < begin_decay_at,
lambda: learning_rate,
lambda: tf.train.exponential_decay(
learning_rate,
global_step - begin_decay_at,
decay_steps,
decay_rate,
staircase=use_staircase_decay),
name="learning_rate",
)
final_lr = tf.maximum(min_lr, new_lr)
return final_lr
def poly_decay(global_step, learning_rate, decay_steps, power=1.0,
begin_decay_at=0, min_lr=0.0, warmup_steps=0):
"""Polynomial decay learning rate policy.
This function is equivalent to ``tensorflow.train.polynomial_decay`` with
some additional functionality. Namely, it adds ``begin_decay_at`` parameter
which is the first step to start decaying learning rate.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
power (float): power for polynomial decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate
(same as ``end_learning_rate`` TensorFlow parameter).
Returns:
learning rate at step ``global_step``.
"""
begin_decay_at = max(warmup_steps, begin_decay_at)
if warmup_steps > 0:
# g_step = tf.cast(global_step, dtype=tf.float32)
# warmup = tf.cast(warmup_steps, dtype=tf.float32)
learning_rate = tf.cond(
global_step < warmup_steps,
lambda: (learning_rate*tf.cast(global_step,tf.float32)/tf.cast(warmup_steps,tf.float32)),
lambda: learning_rate,
)
lr = tf.cond(
global_step < begin_decay_at,
lambda: learning_rate,
lambda: tf.train.polynomial_decay(
learning_rate,
global_step=global_step-begin_decay_at,
decay_steps=decay_steps,
end_learning_rate=min_lr,
power=power),
name="learning_rate"
)
return lr
def transformer_policy(global_step, learning_rate, d_model, warmup_steps,
max_lr=None, coefficient=1.0, dtype=tf.float32):
"""Transformer's learning rate policy from
https://arxiv.org/pdf/1706.03762.pdf
with a hat (max_lr) (also called "noam" learning rate decay scheme).
Args:
global_step: global step TensorFlow tensor (ignored for this policy).
learning_rate (float): initial learning rate to use.
d_model (int): model dimensionality.
warmup_steps (int): number of warm-up steps.
max_lr (float): maximal learning rate, i.e. hat.
coefficient (float): optimizer adjustment.
Recommended 0.002 if using "Adam" else 1.0.
dtype: dtype for this policy.
Returns:
learning rate at step ``global_step``.
"""
step_num = tf.cast(global_step, dtype=dtype)
ws = tf.cast(warmup_steps, dtype=dtype)
decay = coefficient * d_model ** -0.5 * tf.minimum(
(step_num + 1) * ws ** -1.5, (step_num + 1) ** -0.5
)
new_lr = decay * learning_rate
if max_lr is not None:
return tf.minimum(max_lr, new_lr)
return new_lr
def inv_poly_decay(global_step, learning_rate, decay_steps, min_lr,
power=1.0, begin_decay_at=0, warmup_steps=0,
name="learning_rate"):
"""Inverse poly decay learning rate policy.
lr = initial lr / ( 1+ decay * t)^power
This function is similar to ``tensorflow.train.inv_time_decay`` with
some additional functionality. Namely, it adds :
``min_lr`` - end learning rate with 0.00001
``power`` - power
``begin_decay_at``- first step to start decaying learning rate.
Args:
global_step: global step TensorFlow tensor.
learning_rate (float): initial learning rate to use.
decay_steps (int): number of steps to apply decay for.
power (float): power for inv_time_decay.
begin_decay_at (int): the first step to start decaying learning rate.
min_lr (float): minimal value of the learning rate
(same as ``end_learning_rate`` TensorFlow parameter).
Returns:
learning rate at step ``global_step``.
"""
min_lr=max(min_lr, 1e-8)
min_lr=min(min_lr, learning_rate)
if power <= 0.:
raise ValueError("Inv poly decay requires power > 0.")
if global_step is None:
raise ValueError("Inv poly decay requires global_step")
with ops.name_scope(name, "InvDecay",
[learning_rate, global_step]) as name:
scale = (math.pow(learning_rate / min_lr, 1./power) - 1.) / decay_steps
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
decay_steps = tf.cast(decay_steps, tf.float32)
global_step = tf.cast(global_step, tf.float32)
denom = tf.pow(1. + scale * global_step , power)
lr = tf.div(learning_rate, denom, name=name)
return lr
| true |
0c5bab4b95d6bfd3a15f3868619a407a818c2315 | Python | dhmit/gender_analysis | /gender_analysis/analysis/dunning.py | UTF-8 | 27,294 | 3 | 3 | [
"BSD-3-Clause"
] | permissive | import math
from collections import Counter
from operator import itemgetter
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
from gender_analysis.gender.common import HE_SERIES, SHE_SERIES
from gender_analysis.text.common import (
load_graph_settings,
MissingMetadataError,
store_pickle,
load_pickle,
)
from gender_analysis.text.corpus import Corpus
# temporary helper functions, to be removed when corpus refactor is complete
def _get_wordcount_counter(corpus: Corpus):
"""
This function returns a Counter object that stores
how many times each word appears in the corpus.
:return: Python Counter object
>>> from gender_analysis.text import Corpus
>>> from gender_analysis.testing.common import (
... TEST_CORPUS_PATH as path,
... SMALL_TEST_CORPUS_CSV as path_to_csv
... )
>>> from gender_analysis.analysis.dunning import _get_wordcount_counter
>>> c = Corpus(path, csv_path=path_to_csv, ignore_warnings = True)
>>> word_count = _get_wordcount_counter(c)
>>> word_count['fire']
157
"""
corpus_counter = Counter()
for current_document in corpus:
document_counter = current_document.get_wordcount_counter()
corpus_counter += document_counter
return corpus_counter
################################################################################
# BASIC DUNNING FUNCTIONS
################################################################################
def dunn_individual_word(total_words_in_corpus_1,
total_words_in_corpus_2,
count_of_word_in_corpus_1,
count_of_word_in_corpus_2):
"""
applies Dunning log likelihood to compare individual word in two counter objects
:param total_words_in_corpus_1: int, total wordcount in corpus 1
:param total_words_in_corpus_2: int, total wordcount in corpus 2
:param count_of_word_in_corpus_1: int, wordcount of one word in corpus 1
:param count_of_word_in_corpus_2: int, wordcount of one word in corpus 2
:return: Float representing the Dunning log likelihood of the given inputs
>>> total_words_m_corpus = 8648489
>>> total_words_f_corpus = 8700765
>>> wordcount_female = 1000
>>> wordcount_male = 50
>>> dunn_individual_word(total_words_m_corpus,
... total_words_f_corpus,
... wordcount_male,
... wordcount_female)
-1047.8610274053995
"""
# NOTE(ra): super short var names actually useful here for reading the math
# pylint: disable=invalid-name
a = count_of_word_in_corpus_1
b = count_of_word_in_corpus_2
c = total_words_in_corpus_1
d = total_words_in_corpus_2
e1 = c * (a + b) / (c + d)
e2 = d * (a + b) / (c + d)
dunning_log_likelihood = 2 * (a * math.log(a / e1) + b * math.log(b / e2))
if count_of_word_in_corpus_1 * math.log(count_of_word_in_corpus_1 / e1) < 0:
dunning_log_likelihood = -dunning_log_likelihood
return dunning_log_likelihood
def dunn_individual_word_by_corpus(corpus1, corpus2, target_word):
"""
applies dunning log likelihood to compare individual word in two counter objects
(-) end of spectrum is words for counter_2
(+) end of spectrum is words for counter_1
the larger the magnitude of the number, the more distinctive that word is in its
respective counter object
:param target_word: desired word to compare
:param corpus1: Corpus object
:param corpus2: Corpus object
:return: log likelihoods and p value
>>> from gender_analysis import Corpus
>>> from gender_analysis.analysis.dunning import dunn_individual_word_by_corpus
>>> from gender_analysis.testing.common import TEST_DATA_DIR
>>> from gender_analysis.testing.common import (
... TEST_CORPUS_PATH as FILEPATH2,
... SMALL_TEST_CORPUS_CSV as PATH_TO_CSV
... )
>>> filepath1 = TEST_DATA_DIR / 'document_test_files'
>>> test_corpus1 = Corpus(filepath1)
>>> test_corpus2 = Corpus(FILEPATH2, csv_path = PATH_TO_CSV, ignore_warnings = True)
>>> dunn_individual_word_by_corpus(test_corpus1, test_corpus2, 'sad')
-332112.16673673474
"""
counter1 = _get_wordcount_counter(corpus1)
counter2 = _get_wordcount_counter(corpus2)
word_count_1 = counter1[target_word]
word_count_2 = counter2[target_word]
total_words_corpus_1 = 0
total_words_corpus_2 = 0
for word in counter1:
total_words_corpus_1 += counter1[word]
for word in counter2:
total_words_corpus_2 += counter2[word]
return dunn_individual_word(word_count_1,
word_count_2,
total_words_corpus_1,
total_words_corpus_2)
def dunning_total(counter1, counter2, pickle_filepath=None):
"""
Runs dunning_individual on words shared by both counter objects
(-) end of spectrum is words for counter_2
(+) end of spectrum is words for counter_1
the larger the magnitude of the number, the more distinctive that word is in its
respective counter object
use pickle_filepath to store the result so it only has to be calculated once and can be
used for multiple analyses.
:param counter1: Python Counter object
:param counter2: Python Counter object
:param pickle_filepath: Filepath to store pickled results; will not save output if None
:return: Dictionary
>>> from collections import Counter
>>> from gender_analysis.analysis.dunning import dunning_total
>>> female_counter = Counter({'he': 1, 'she': 10, 'and': 10})
>>> male_counter = Counter({'he': 10, 'she': 1, 'and': 10})
>>> results = dunning_total(female_counter, male_counter)
Results is a dict that maps from terms to results
Each result dict contains the dunning score...
>>> results['he']['dunning']
-8.547243830635558
... counts for corpora 1 and 2 as well as total count
>>> results['he']['count_total'], results['he']['count_corp1'], results['he']['count_corp2']
(11, 1, 10)
... and the same for frequencies
>>> results['he']['freq_total'], results['he']['freq_corp1'], results['he']['freq_corp2']
(0.2619047619047619, 0.047619047619047616, 0.47619047619047616)
"""
total_words_counter1 = 0
total_words_counter2 = 0
# get word total in respective counters
for word1 in counter1:
total_words_counter1 += counter1[word1]
for word2 in counter2:
total_words_counter2 += counter2[word2]
# dictionary where results will be returned
dunning_result = {}
for word in counter1:
counter1_wordcount = counter1[word]
if word in counter2:
counter2_wordcount = counter2[word]
if counter1_wordcount + counter2_wordcount < 10:
continue
dunning_word = dunn_individual_word(total_words_counter1,
total_words_counter2,
counter1_wordcount,
counter2_wordcount)
dunning_result[word] = {
'dunning': dunning_word,
'count_total': counter1_wordcount + counter2_wordcount,
'count_corp1': counter1_wordcount,
'count_corp2': counter2_wordcount,
'freq_total':
(counter1_wordcount + counter2_wordcount)
/ (total_words_counter1 + total_words_counter2),
'freq_corp1': counter1_wordcount / total_words_counter1,
'freq_corp2': counter2_wordcount / total_words_counter2
}
if pickle_filepath:
store_pickle(dunning_result, pickle_filepath)
return dunning_result
def dunning_total_by_corpus(m_corpus, f_corpus):
"""
Goes through two corpora, e.g. corpus of male authors and corpus of female authors
runs dunning_individual on all words that are in BOTH corpora
returns sorted dictionary of words and their dunning scores
shows top 10 and lowest 10 words
:param m_corpus: Corpus object
:param f_corpus: Corpus object
:return: list of tuples (common word, (dunning value, m_corpus_count, f_corpus_count))
>>> from gender_analysis.analysis.dunning import dunning_total_by_corpus
>>> from gender_analysis import Corpus
>>> from gender_analysis.testing.common import TEST_CORPUS_PATH, SMALL_TEST_CORPUS_CSV
>>> c = Corpus(TEST_CORPUS_PATH, csv_path=SMALL_TEST_CORPUS_CSV, ignore_warnings = True)
>>> test_m_corpus = c.filter_by_gender('male')
>>> test_f_corpus = c.filter_by_gender('female')
>>> result = dunning_total_by_corpus(test_m_corpus, test_f_corpus)
>>> print(result[0])
('mrs', (-675.5338738828469, 1, 2031))
"""
wordcounter_male = _get_wordcount_counter(m_corpus)
wordcounter_female = _get_wordcount_counter(f_corpus)
totalmale_words = 0
totalfemale_words = 0
for male_word in wordcounter_male:
totalmale_words += wordcounter_male[male_word]
for female_word in wordcounter_female:
totalfemale_words += wordcounter_female[female_word]
dunning_result = {}
for word in wordcounter_male:
wordcount_male = wordcounter_male[word]
if word in wordcounter_female:
wordcount_female = wordcounter_female[word]
dunning_word = dunn_individual_word(totalmale_words, totalfemale_words,
wordcount_male, wordcount_female)
dunning_result[word] = (dunning_word, wordcount_male, wordcount_female)
dunning_result = sorted(dunning_result.items(), key=itemgetter(1))
return dunning_result
def compare_word_association_in_corpus_dunning(word1, word2, corpus,
to_pickle=False,
pickle_filename='dunning_vs_associated_words.pgz'):
"""
Uses Dunning analysis to compare the words associated with word1
vs those associated with word2 in the given corpus.
:param word1: str
:param word2: str
:param corpus: Corpus object
:param to_pickle: boolean; True if you wish to save the results as a Pickle file
:param pickle_filename: str or Path object;
Only used if the pickle already exists
or you wish to write a new pickle file
:return: Dictionary mapping words to dunning scores
"""
corpus_name = corpus.name if corpus.name else 'corpus'
try:
results = load_pickle(pickle_filename)
except IOError:
try:
pickle_filename = f'dunning_{word2}_vs_{word1}_associated_words_{corpus_name}'
results = load_pickle(pickle_filename)
except IOError:
word1_counter = Counter()
word2_counter = Counter()
for doc in corpus.documents:
if isinstance(word1, str):
word1_counter.update(doc.words_associated(word1))
else: # word1 is a list of strings
for word in word1:
word1_counter.update(doc.words_associated(word))
if isinstance(word2, str):
word2_counter.update(doc.words_associated(word2))
else: # word2 is a list of strings
for word in word2:
word2_counter.update(doc.words_associated(word))
if to_pickle:
results = dunning_total(word1_counter, word2_counter,
pickle_filepath=pickle_filename)
else:
results = dunning_total(word1_counter, word2_counter)
for group in [None, 'verbs', 'adjectives', 'pronouns', 'adverbs']:
dunning_result_displayer(results, number_of_terms_to_display=50,
part_of_speech_to_include=group)
return results
def compare_word_association_between_corpus_dunning(word, corpus1, corpus2,
word_window=None,
to_pickle=False,
pickle_filename='dunning_associated_words.pgz'):
"""
Finds words associated with the given word between the two corpora. The function can search the
document automatically, or passing in a word window can refine results.
:param word: Word to compare between the two corpora
:param corpus1: Corpus object
:param corpus2: Corpus object
:param word_window: If passed in as int, trims results
to only show associated words within that range.
:param to_pickle: boolean determining if results should be pickled.
:param pickle_filename: str or Path object,
location of existing pickle or save location for new pickle
:return: Dictionary
"""
corpus1_name = corpus1.name if corpus1.name else 'corpus1'
corpus2_name = corpus2.name if corpus2.name else 'corpus2'
corpus1_counter = Counter()
corpus2_counter = Counter()
for doc in corpus1.documents:
if word_window:
doc.get_word_windows(word, window_size=word_window)
else:
if isinstance(word, str):
corpus1_counter.update(doc.words_associated(word))
else: # word is a list of actual words
for token in word:
corpus1_counter.update(doc.words_associated(token))
for doc in corpus2.documents:
if word_window:
doc.get_word_windows(word, window_size=word_window)
else:
if isinstance(word, str):
corpus2_counter.update(doc.words_associated(word))
else: # word is a list of actual words
for token in word:
corpus2_counter.update(doc.words_associated(token))
if to_pickle:
results = dunning_total(corpus1_counter,
corpus2_counter,
pickle_filepath=pickle_filename)
else:
results = dunning_total(corpus1_counter,
corpus2_counter)
for group in [None, 'verbs', 'adjectives', 'pronouns', 'adverbs']:
dunning_result_displayer(results, number_of_terms_to_display=20,
corpus1_display_name=f'{corpus1_name}. {word}',
corpus2_display_name=f'{corpus2_name}. {word}',
part_of_speech_to_include=group)
return results
def dunning_result_to_dict(dunning_result,
number_of_terms_to_display=10,
part_of_speech_to_include=None):
"""
Receives a dictionary of results and returns a dictionary of the top
number_of_terms_to_display most distinctive results for each corpus that have a part of speech
matching part_of_speech_to_include
:param dunning_result: Dunning result dict that will be sorted through
:param number_of_terms_to_display: Number of terms for each corpus to display
:param part_of_speech_to_include: 'adjectives', 'adverbs', 'verbs', or 'pronouns'
:return: dict
"""
pos_names_to_tags = {
'adjectives': ['JJ', 'JJR', 'JJS'],
'adverbs': ['RB', 'RBR', 'RBS', 'WRB'],
'verbs': ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
'pronouns': ['PRP', 'PRP$', 'WP', 'WP$']
}
if part_of_speech_to_include in pos_names_to_tags:
part_of_speech_to_include = pos_names_to_tags[part_of_speech_to_include]
final_results_dict = {}
reverse = True
for i in range(2):
sorted_results = sorted(dunning_result.items(), key=lambda x: x[1]['dunning'],
reverse=reverse)
count_displayed = 0
for result in sorted_results:
if count_displayed == number_of_terms_to_display:
break
term = result[0]
term_pos = nltk.pos_tag([term])[0][1]
if part_of_speech_to_include and term_pos not in part_of_speech_to_include:
continue
final_results_dict[result[0]] = result[1]
count_displayed += 1
reverse = False
return final_results_dict
################################################################################
# Visualizers
################################################################################
def dunning_result_displayer(
dunning_result,
number_of_terms_to_display=10,
corpus1_display_name=None,
corpus2_display_name=None,
part_of_speech_to_include=None,
save_to_filename=None
):
"""
Convenience function to display dunning results as tables.
part_of_speech_to_include can either be a list of POS tags or a 'adjectives, 'adverbs',
'verbs', or 'pronouns'. If it is None, all terms are included.
Optionally save the output to a text file
:param dunning_result: Dunning result dict to display
:param number_of_terms_to_display: Number of terms for each corpus to display
:param corpus1_display_name: Name of corpus 1 (e.g. "Female Authors")
:param corpus2_display_name: Name of corpus 2 (e.g. "Male Authors")
:param part_of_speech_to_include: e.g. 'adjectives', or 'verbs'
:param save_to_filename: Filename to save output
:return:
"""
# pylint: disable=too-many-locals
pos_names_to_tags = {
'adjectives': ['JJ', 'JJR', 'JJS'],
'adverbs': ['RB', 'RBR', 'RBS', 'WRB'],
'verbs': ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'],
'pronouns': ['PRP', 'PRP$', 'WP', 'WP$']
}
if part_of_speech_to_include in pos_names_to_tags:
part_of_speech_to_include = pos_names_to_tags[part_of_speech_to_include]
if not corpus1_display_name:
corpus1_display_name = 'Corpus 1'
if not corpus2_display_name:
corpus2_display_name = 'Corpus 2'
headings = ['term', 'dunning', 'count_total', 'count_corp1', 'count_corp2', 'freq_total',
'freq_corp1', 'freq_corp2']
output = f'\nDisplaying Part of Speech: {part_of_speech_to_include}\n'
for i, name in enumerate([corpus1_display_name, corpus2_display_name]):
output += f'\nDunning Log-Likelihood results for {name}\n|'
for heading in headings:
heading = heading.replace('_corp1', ' '
+ corpus1_display_name).replace('_corp2', ' '
+ corpus2_display_name)
output += ' {:19s}|'.format(heading)
output += '\n' + 8 * 21 * '_' + '\n'
reverse = True
if i == 1:
reverse = False
sorted_results = sorted(dunning_result.items(), key=lambda x: x[1]['dunning'],
reverse=reverse)
count_displayed = 0
for result in sorted_results:
if count_displayed == number_of_terms_to_display:
break
term = result[0]
term_pos = nltk.pos_tag([term])[0][1]
if part_of_speech_to_include and term_pos not in part_of_speech_to_include:
continue
output += '| {:18s}|'.format(result[0])
for heading in headings[1:]:
if heading in ['freq_total', 'freq_corp1', 'freq_corp2']:
output += ' {:16.4f}% |'.format(result[1][heading] * 100)
elif heading in ['dunning']:
output += ' {:17.2f} |'.format(result[1][heading])
else:
output += ' {:17.0f} |'.format(result[1][heading])
output += '\n'
count_displayed += 1
if save_to_filename:
with open(save_to_filename + '.txt', 'w') as outfile:
outfile.write(output)
print(output)
def score_plot_to_show(results):
"""
displays bar plot of dunning scores for all words in results
:param results: dict of results from dunning_total or similar, i.e. in the form {'word': {
'dunning': float}}
:return: None, displays bar plot of dunning scores for all words in results
"""
load_graph_settings(False)
results_dict = dict(results)
words = []
dunning_score = []
for term, data in results_dict.items():
words.append(term)
dunning_score.append(data['dunning'])
opacity = 0.4
colors = ['r' if entry >= 0 else 'b' for entry in dunning_score]
axis = sns.barplot(x=dunning_score, y=words, palette=colors, alpha=opacity)
sns.despine(ax=axis, bottom=True, left=True)
plt.show()
def freq_plot_to_show(results):
"""
displays bar plot of relative frequency of all words in results
:param results: dict of results from dunning_total or similar, i.e. in the form {'word': {
'freq_corp1': int, 'freq_corp2': int, 'freq_total': int}}
:return: None, displays bar plot of relative frequency of all words in results
"""
load_graph_settings(False)
results_dict = dict(results)
words = []
female_rel_freq = []
male_rel_freq = []
for term, data in results_dict.items():
words.append(term)
female_rel_freq.append(data['freq_corp1'] / data['freq_total'])
male_rel_freq.append(-1 * data['freq_corp2'] / data['freq_total'])
opacity = 0.4
colors = ['b']
axis = sns.barplot(x=male_rel_freq, y=words, palette=colors, alpha=opacity)
sns.despine(ax=axis, bottom=True, left=True)
plt.show()
################################################################################
# Individual Analyses
################################################################################
# Words associated with male and female characters,
# based on whether author is male or female
def male_characters_author_gender_differences(corpus, to_pickle=False,
pickle_filename='dunning_male_chars_auth_gender.pgz'):
"""
Between male-author and female-author subcorpora, tests distinctiveness of words associated
with male characters
Prints out the most distinctive terms overall as well as grouped by verbs, adjectives etc.
:param corpus: Corpus object
:param to_pickle: boolean, False by default. Set to True in order to pickle results
:param pickle_filename: filename of results to be pickled
:return: dict
"""
if 'author_gender' not in corpus.metadata_fields:
raise MissingMetadataError(['author_gender'])
m_corpus = corpus.filter_by_gender('male')
f_corpus = corpus.filter_by_gender('female')
return compare_word_association_between_corpus_dunning(HE_SERIES, m_corpus, f_corpus,
word_window=None, to_pickle=to_pickle,
pickle_filename=pickle_filename)
def female_characters_author_gender_differences(
corpus,
to_pickle=False,
pickle_filename='dunning_female_chars_author_gender.pgz'
):
"""
Between male-author and female-author subcorpora, tests distinctiveness of words associated
with male characters
Prints out the most distinctive terms overall as well as grouped by verbs, adjectives etc.
:param corpus: Corpus object
:param to_pickle: boolean, False by default. Set to True in order to pickle results
:param pickle_filename: filename of results to be pickled
:return: dict
"""
if 'author_gender' not in corpus.metadata_fields:
raise MissingMetadataError(['author_gender'])
m_corpus = corpus.filter_by_gender('male')
f_corpus = corpus.filter_by_gender('female')
return compare_word_association_between_corpus_dunning(SHE_SERIES, m_corpus, f_corpus,
word_window=None, to_pickle=to_pickle,
pickle_filename=pickle_filename)
def dunning_words_by_author_gender(corpus, display_results=False, to_pickle=False,
pickle_filename='dunning_male_vs_female_authors.pgz'):
"""
Tests distinctiveness of shared words between male and female authors using dunning analysis.
If called with display_results=True, prints out the most distinctive terms overall as well as
grouped by verbs, adjectives etc.
Returns a dict of all terms in the corpus mapped to the dunning data for each term
:param corpus: Corpus object
:param display_results: Boolean; reports a visualization of the results if True
:param to_pickle: Boolean; Will save the results to a pickle file if True
:param pickle_filename: Path to pickle object; will try to search for results in this location
or write pickle file to path if to_pickle is true.
:return: dict
"""
if 'author_gender' not in corpus.metadata_fields:
raise MissingMetadataError(['author_gender'])
# By default, try to load precomputed results. Only calculate if no stored results are
# available.
try:
results = load_pickle(pickle_filename)
except IOError:
m_corpus = corpus.filter_by_gender('male')
f_corpus = corpus.filter_by_gender('female')
wordcounter_male = _get_wordcount_counter(m_corpus)
wordcounter_female = _get_wordcount_counter(f_corpus)
if to_pickle:
results = dunning_total(wordcounter_female,
wordcounter_male,
pickle_filepath=pickle_filename)
else:
results = dunning_total(wordcounter_female, wordcounter_male)
if display_results:
for group in [None, 'verbs', 'adjectives', 'pronouns', 'adverbs']:
dunning_result_displayer(results, number_of_terms_to_display=20,
corpus1_display_name='Fem Author',
corpus2_display_name='Male Author',
part_of_speech_to_include=group)
return results
def masc_fem_associations_dunning(corpus,
to_pickle=False,
pickle_filename='dunning_he_vs_she_associated_words.pgz'):
"""
Uses Dunning analysis to compare words associated with HE_SERIES vs. words associated
with SHE_SERIES in a given Corpus.
:param corpus: Corpus object
:param to_pickle: Boolean; saves results to a pickle file if True
:param pickle_filename: Filepath to save pickle file if to_pickle is True
:return: Dictionary
"""
if to_pickle:
return compare_word_association_in_corpus_dunning(HE_SERIES, SHE_SERIES, corpus,
to_pickle=True,
pickle_filename=pickle_filename)
return compare_word_association_in_corpus_dunning(HE_SERIES, SHE_SERIES, corpus)
| true |
ea45ebbb7708f3a4c2f819773840836d2f5cb518 | Python | Nofitaika/Belajar-Git | /Kuis kamus.py | UTF-8 | 4,034 | 2.90625 | 3 | [] | no_license | Python 3.7.2 (tags/v3.7.2:9a3ffc0492, Dec 23 2018, 22:20:52) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> data3 = [1,2,3,4,5,6,7,8,9,10,11,12,13]
>>> data3
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
>>> ganjil = []
>>> for i in data3:
if i%2 == 1;
ganjil.append(i)
ganjil
SyntaxError: invalid syntax
>>> for i in data3:
if i%2 == 1 :
ganjil.append(i)
>>> ganjil
[1, 3, 5, 7, 9, 11, 13]
>>> genap = [ x for in data3 if x%2 == 0 ]
SyntaxError: invalid syntax
>>> genap = [ x for x in data3 if x%2 == 0 ]
>>> genap
[2, 4, 6, 8, 10, 12]
>>> # dictionary
>>> kota : {'jkt': 'Jakarta', 'bdg' : 'Bandung', 'sby' : 'Surabaya'}
>>> type(kota)
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
type(kota)
NameError: name 'kota' is not defined
>>> kota : {'jkt': 'Jakarta', 'bdg' : 'Bandung', 'sby' : 'Surabaya'}
>>> kota ={'jkt': 'Jakarta', 'bdg' : 'Bandung', 'sby' : 'Surabaya'}
SyntaxError: multiple statements found while compiling a single statement
>>> kota = {'jkt': 'Jakarta', 'bdg' : 'Bandung', 'sby' : 'Surabaya'}
>>> type(kota)
<class 'dict'>
>>> for k,v in kota.items():
print ("%s -> %s" % (k,v))
jkt -> Jakarta
bdg -> Bandung
sby -> Surabaya
>>> for k,v in kota.items():
print(k,v)
jkt Jakarta
bdg Bandung
sby Surabaya
>>> for k,v in kota.items():
print( k, "->" , v)
jkt -> Jakarta
bdg -> Bandung
sby -> Surabaya
>>> # Tuple
>>> daftar = {'butterfly' : 'kupu-kupu', 'elephant' : 'gajah', 'girrafe' : 'jerapah'}
>>> def kamus(a):
for k,v in daftar.items():
if a = k
print(v)
SyntaxError: expected an indented block
>>> def kamus(a):
for k,v daftar.items():
SyntaxError: invalid syntax
>>> def kamus(a):
daftar = {'butterfly' : 'kupu-kupu', 'elephant' : 'gajah', 'girrafe' : 'jerapah'}
for k,v in daftar.items():
if a == k:
return(v)
>>> kamus(butterfly)
Traceback (most recent call last):
File "<pyshell#39>", line 1, in <module>
kamus(butterfly)
NameError: name 'butterfly' is not defined
>>> kamus('butterfly')
'kupu-kupu'
>>> def kamus():
a=input('Kata apa yang ingin kamu tahu?')
a = a.lower()
daftar = {'butterfly' : 'kupu-kupu', 'elephant' : 'gajah', 'girrafe' : 'jerapah'}
for k,v in daftar.items():
if a == k:
return(v)
else:
print('Oops maaf saya tidak bisa membantumu')
>>> kamus()
Kata apa yang ingin kamu tahu?Aku
Oops maaf saya tidak bisa membantumu
Oops maaf saya tidak bisa membantumu
Oops maaf saya tidak bisa membantumu
>>> def kamus():
a=input('Kata apa yang ingin kamu tahu?')
a = a.lower()
daftar = {'butterfly' : 'kupu-kupu', 'elephant' : 'gajah', 'girrafe' : 'jerapah'}
for k,v in daftar.items():
if a == k:
return(v)
else:
return('Oops maaf saya tidak bisa membantumu')
>>> kamus()
Kata apa yang ingin kamu tahu?Akus
'Oops maaf saya tidak bisa membantumu'
>>> kamus()
Kata apa yang ingin kamu tahu?ButTerfly
'kupu-kupu'
>>> kamus()
Kata apa yang ingin kamu tahu?butterfly
'Oops maaf saya tidak bisa membantumu'
>>> def kamus():
a=input('Kata apa yang ingin kamu tahu?')
a = a.lower()
a = a.replace(' ',)
daftar = {'butterfly' : 'kupu-kupu', 'elephant' : 'gajah', 'girrafe' : 'jerapah'}
for k,v in daftar.items():
if a == k:
return(v)
else:
return('Oops maaf saya tidak bisa membantumu')
>>> kamus()
Kata apa yang ingin kamu tahu?butterfly
Traceback (most recent call last):
File "<pyshell#58>", line 1, in <module>
kamus()
File "<pyshell#57>", line 4, in kamus
a = a.replace(' ',)
TypeError: replace() takes at least 2 arguments (1 given)
>>> def kamus():
a=input('Kata apa yang ingin kamu tahu?')
a = a.lower()
a = a.replace(' ','')
daftar = {'butterfly' : 'kupu-kupu', 'elephant' : 'gajah', 'girrafe' : 'jerapah'}
for k,v in daftar.items():
if a == k:
return(v)
else:
return('Oops maaf saya tidak bisa membantumu')
>>> kamus()
Kata apa yang ingin kamu tahu?Butterfly
'kupu-kupu'
>>> kamus()
Kata apa yang ingin kamu tahu?
'Oops maaf saya tidak bisa membantumu'
>>>
| true |
b95dd7fff8c275cb1925d60b659c3808be3536ad | Python | AugustoDipNiloy/Uri-Solution | /Python3/uri1013.py | UTF-8 | 339 | 3.421875 | 3 | [
"MIT"
] | permissive | class Main:
def __init__(self):
self.li = []
self.li = input().split()
def maxValue(self):
return max(max(int(self.li[0]), int(self.li[1])), int(self.li[2]))
def output(self):
print("{mx} eh o maior".format(mx = self.maxValue()))
if __name__ == '__main__':
obj = Main()
obj.output()
| true |
45f3ac80a9adeb5f71e1f0fc49f0b30f87fc1e08 | Python | MANOJPATRA1991/Data-Structures-and-Algorithms-in-Python | /Searching and Sorting/Binary Search/bsmain.py | UTF-8 | 1,062 | 4.6875 | 5 | [] | no_license |
def binary_search(input_array, value):
"""
Binary search
Args:
input_array: List in which to search
value: Item to search for
Returns:
Integer: Index of the element if found else -1
"""
# Keeps track of start index
first = 0
# Keeps track of end index
last = len(input_array) - 1
while first <= last:
# Find the mid value
mid = (first + last)//2
# If item is the mid value of the list
if input_array[mid] == value:
return input_array.index(value)
# If item is greater than the mid value,
# search in the right half
elif value > input_array[mid]:
first = mid + 1
# If item is less than the mid value,
# search in the left half
elif value < input_array[mid]:
last = mid - 1
# Item not found
return -1
test_list = [1, 3, 9, 11, 15, 19, 29]
test_val1 = 25
test_val2 = 15
print(binary_search(test_list, test_val1))
print(binary_search(test_list, test_val2))
| true |
a0a5a0ec24a343e5c9dab15a9329ee923deb5717 | Python | JanKalo/RelAlign | /thirdParty/OpenKE/models/Model.py | UTF-8 | 4,425 | 2.65625 | 3 | [
"MIT"
] | permissive | #coding:utf-8
import numpy as np
import tensorflow as tf
class Model(object):
@staticmethod
def __orthogonal_procrustes(r, a, b):
return tf.norm(tf.subtract(tf.matmul(r, a), b), ord='fro', axis=(0, 1))
@staticmethod
def _tensor_alignment(a, b):
# assuming that b is a successor of a that ONLY ADDS new elements
# e.g. b contains every element from a including new elements:
# - transpose them (will result in a d x d rotation matrix independent of element counts)
# - slice b to a's shape
# - apply orthogonal procrustes
a_transposed = tf.transpose(a)
b_transposed = tf.slice(tf.transpose(b), [0, 0], a_transposed.shape)
m = tf.matmul(b_transposed, a)
_, u, v = tf.svd(m)
r = tf.matmul(u, tf.transpose(v))
return Model.__orthogonal_procrustes(r, a_transposed, b_transposed)
def get_config(self):
return self.config
def get_positive_instance(self, in_batch = True):
if in_batch:
return [self.postive_h, self.postive_t, self.postive_r]
else:
return [self.batch_h[0:self.config.batch_size], \
self.batch_t[0:self.config.batch_size], \
self.batch_r[0:self.config.batch_size]]
def get_negative_instance(self, in_batch = True):
if in_batch:
return [self.negative_h, self.negative_t, self.negative_r]
else:
return [self.batch_h[self.config.batch_size:self.config.batch_seq_size], \
self.batch_t[self.config.batch_size:self.config.batch_seq_size], \
self.batch_r[self.config.batch_size:self.config.batch_seq_size]]
def get_all_instance(self, in_batch = False):
if in_batch:
return [tf.transpose(tf.reshape(self.batch_h, [1 + self.config.negative_ent + self.config.negative_rel, -1]), [1, 0]), \
tf.transpose(tf.reshape(self.batch_t, [1 + self.config.negative_ent + self.config.negative_rel, -1]), [1, 0]), \
tf.transpose(tf.reshape(self.batch_r, [1 + self.config.negative_ent + self.config.negative_rel, -1]), [1, 0])]
else:
return [self.batch_h, self.batch_t, self.batch_r]
def get_all_labels(self, in_batch = False):
if in_batch:
return tf.transpose(tf.reshape(self.batch_y, [1 + self.config.negative_ent + self.config.negative_rel, -1]), [1, 0])
else:
return self.batch_y
def get_predict_instance(self):
return [self.predict_h, self.predict_t, self.predict_r]
def input_def(self):
config = self.config
self.batch_h = tf.placeholder(tf.int64, [config.batch_seq_size])
self.batch_t = tf.placeholder(tf.int64, [config.batch_seq_size])
self.batch_r = tf.placeholder(tf.int64, [config.batch_seq_size])
self.batch_y = tf.placeholder(tf.float32, [config.batch_seq_size])
self.postive_h = tf.transpose(tf.reshape(self.batch_h[0:config.batch_size], [1, -1]), [1, 0])
self.postive_t = tf.transpose(tf.reshape(self.batch_t[0:config.batch_size], [1, -1]), [1, 0])
self.postive_r = tf.transpose(tf.reshape(self.batch_r[0:config.batch_size], [1, -1]), [1, 0])
self.negative_h = tf.transpose(tf.reshape(self.batch_h[config.batch_size:config.batch_seq_size], [config.negative_ent + config.negative_rel, -1]), perm=[1, 0])
self.negative_t = tf.transpose(tf.reshape(self.batch_t[config.batch_size:config.batch_seq_size], [config.negative_ent + config.negative_rel, -1]), perm=[1, 0])
self.negative_r = tf.transpose(tf.reshape(self.batch_r[config.batch_size:config.batch_seq_size], [config.negative_ent + config.negative_rel, -1]), perm=[1, 0])
self.predict_h = tf.placeholder(tf.int64, [None])
self.predict_t = tf.placeholder(tf.int64, [None])
self.predict_r = tf.placeholder(tf.int64, [None])
self.parameter_lists = []
def embedding_def(self):
pass
def loss_def(self):
pass
def predict_def(self):
pass
def __init__(self, config):
self.config = config
with tf.name_scope("input"):
self.input_def()
with tf.name_scope("embedding"):
self.embedding_def()
with tf.name_scope("loss"):
self.loss_def()
with tf.name_scope("predict"):
self.predict_def()
| true |
9ad84aef435a8ea6d013469b8d21a3538be2c303 | Python | galactocalypse/python | /Unit4/sol20.py | UTF-8 | 1,182 | 3.6875 | 4 | [] | no_license | # Prob20: Mastermind
s = raw_input('Enter 4-digit key: ')
iarr = list("0123456789")
arr = list("0123456789")
for i in range(10):
iarr[i] = 0
for i in s:
if ord(i) >= 48 and ord(i) <= 57:
iarr[ord(i) - 48] += 1
tries = 0
gs = []
while True:
invalid = True
while invalid:
print 'Used guesses: ', gs
invalid = False
g = raw_input('Enter guess: ')
#Rule 1
if len(g) != 4:
invalid = True
if invalid:
print 'Bad guess.'
continue
#Rule 2
for i in range(10):
arr[i] = 0
for i in g:
if ord(i) >= 48 and ord(i) <= 57:
arr[ord(i) - 48] += 1
else:
invalid = True
break
if invalid:
print 'Bad guess.'
continue
#Rule 3
for i in range(10):
if arr[i] > 1:
invalid = True
break
if invalid:
print 'Bad guess.'
gs.append(g)
tries += 1
if g == s:
print 'Correct! Used ', tries, 'tries.'
break
else:
print 'Incorrect guess.'
if tries == 12:
print 'All tries used. Game over. Key: ', s
else:
c = 0
d = 0
for i in range(4):
if s[i] == g[i]:
d += 1
print 'Correct digits: ', d
for i in range(10):
if arr[i] == 1 and iarr[i] == 1:
c += 1
print 'Wrong positions: ', c - d
| true |
cb1a9029ce76e2782e959e31334ee87f22b0443d | Python | joshuaNewman10/ml | /ml/text/experiment/char_level_language_model.py | UTF-8 | 435 | 2.734375 | 3 | [] | no_license | from keras import Input, Model
from keras.layers import LSTM, Dense
class CharLevelLanguageModelExperiment:
def get_model(self, max_document_length, vocabulary_size):
input = Input(shape=(max_document_length, vocabulary_size))
x = LSTM(units=75)(input)
y = Dense(vocabulary_size, activation="softmax")(x)
model = Model(inputs=input, outputs=y)
print(model.summary())
return model
| true |
0f8e0e76de80f4be342baaef82598dca4eec1927 | Python | joeljosephjin/La-MAML | /get_data.py | UTF-8 | 1,423 | 2.609375 | 3 | [] | permissive | # Copyright 2019-present, IBM Research
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import download
import argparse
def get_mnist_data(url, data_dir):
print("Downloading {} into {}".format(url, data_dir))
download.maybe_download_and_extract(url, data_dir)
def get_datasets():
parser = argparse.ArgumentParser()
parser.add_argument("dataset", help="Either the name of the dataset (rotations, permutations, manypermutations), or `all` to download all datasets")
args = parser.parse_args()
# Change dir to the location of this file (repo's root)
get_data_path = os.path.realpath(__file__)
os.chdir(os.path.dirname(get_data_path))
data_dir = os.path.join(os.getcwd(), 'data')
# get files
mnist_rotations = "https://nlp.stanford.edu/data/mer/mnist_rotations.tar.gz"
mnist_permutations = "https://nlp.stanford.edu/data/mer/mnist_permutations.tar.gz"
mnist_many = "https://nlp.stanford.edu/data/mer/mnist_manypermutations.tar.gz"
all = {"rotations": mnist_rotations, "permutations": mnist_permutations, "manypermutations": mnist_many}
if args.dataset == "all":
for dataset in all.values():
get_mnist_data(dataset, data_dir)
else:
get_mnist_data(all[args.dataset], data_dir)
if __name__ == "__main__":
get_datasets()
| true |
45e3f8875e1315b702da71c089ff8984af3768cc | Python | kikijtl/coding | /Candice_coding/Practice/test.py | UTF-8 | 173 | 3.203125 | 3 | [] | no_license | class A(object):
def __init__(self):
self.a = 0
A.b = 1
a1 = A()
print a1.a, a1.b
A.b = 2
a2 = A()
print a1.a, a1.b, A.b
print a2.a, a2.b, A.b | true |
d5c13d07d331c79ac9ee3aa64038073e31d8e82c | Python | Heanthor/rosalind | /proj4/cmsc423_project4-master/cmsc423_project4-master-ed5d0fae5f139092241f814406dc136d09a08fb8/pileup_user.py | UTF-8 | 1,290 | 3.125 | 3 | [] | no_license | from pileup import PileUp
from approximate_matcher import ApproximateMatcher
from Bio import SeqIO
def read_fa_file(filename):
f = open(filename, 'rU')
reads = []
for record in SeqIO.parse(f, "fasta"):
reads.append(record.seq._data)
if len(reads) == 1:
return reads[0]
else:
return reads
def main():
global pileup
reference = read_fa_file("data/reference.fa")
reads = read_fa_file("data/reads.fa")
# initialize object
am = ApproximateMatcher(reference)
pileup = PileUp(reference)
d = 3
for read in reads:
# find matching positions for a given read
# assumes positions is a list (even if only a single match is found)
# with matching positions
positions = am.get_matches(read, d)
if len(positions) > 0:
# add to pileup object
pileup.insert(positions, read)
# prints out mismatching positions
# output is:
# (<position>, <reference_character>, [(<variant_character>,
# <num_times_aligned>)])
# argument filters mismatch by frequency in which variant character
# is observe, e.g., .01 means variant character has to be seen at least
# once for every 100 aligned nucleotides
pileup.print_mismatches(.01)
# main()
| true |
9311b167d4421fe6d29a0c5eac28a0f456ed5f1d | Python | danielzgsilva/CL-MOT | /src/lib/utils/image.py | UTF-8 | 12,605 | 2.671875 | 3 | [] | no_license | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import random
import math
def flip(img):
return img[:, :, ::-1].copy()
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
class GaussianBlur(object):
# OpenCV Gaussian blur as pytorch augmentation
def __init__(self, kernel_size, min=0.1, max=2.0):
self.min = min
self.max = max
# kernel size is set to be 10% of the image height/width
self.kernel_size = kernel_size
def __call__(self, sample):
sample = np.array(sample)
# blur the image with a 50% chance
prob = np.random.random_sample()
if prob < 0.5:
sigma = (self.max - self.min) * np.random.random_sample() + self.min
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return sample
def letterbox(img, height=608, width=1088, color=(127.5, 127.5, 127.5)):
# resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
np.clip(xy[:, 0], 0, width, out=xy[:, 0])
np.clip(xy[:, 2], 0, width, out=xy[:, 2])
np.clip(xy[:, 1], 0, height, out=xy[:, 1])
np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
return imw, targets, M
else:
return imw
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter * 2 + 1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom,
radius - left:radius + right]
masked_reg = reg[:, radius - top:radius + bottom,
radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1])
masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg
regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
return regmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3,))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
| true |
2dc3b12e2ebbae8301a4e6b89d775bc852a82c45 | Python | kamelzcs/leetcode | /next_permutation.py | UTF-8 | 948 | 3.296875 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 zhao <zhao@kamel-Desktop>
#
# Distributed under terms of the MIT license.
class Solution:
# @param num, a list of integer
# @return nothing (void), do not return anything, modify num in-place instead.
def nextPermutation(self, num):
ascending_index, current = -1, 1
while current < len(num):
if num[current] > num[current - 1]:
ascending_index = current - 1
current += 1
if ascending_index == -1:
num.sort()
else:
for index in range(len(num) - 1, 0, -1):
if num[index] > num[ascending_index]:
num[ascending_index], num[index] = num[index], num[ascending_index]
break
num[ascending_index + 1:] = reversed(num[ascending_index + 1:])
data = [1, 3, 2]
Solution().nextPermutation(data)
print data
| true |
88a0d00b1093e24d38beb455d38ffb430a9e5c5a | Python | joemzhao/generative | /adversarial/full/helpers.py | UTF-8 | 2,756 | 2.75 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import json
def generate_samples(sess, trainable, candidates, output_file):
generated_samples = trainable.generate(sess, candidates)
with open(output_file, "w") as fout:
for item in generated_samples:
temp = ' '.join([str(x) for x in item]) + '\n'
fout.write(temp)
fout.close()
return generated_samples
def pre_train_epoch(sess, trainable, data_loader):
'''
Since this is for pretraining, for placeholder in the fusing operator
aka input candidates we will just feed zeros.
'''
supervised_g_loss = []
data_loader.reset_pointer()
candidates = data_loader.candidates
for batch in xrange(data_loader.num_batch):
if batch % 10 == 0 and batch > 0:
print "%d / %d" % (batch, data_loader.num_batch)
print "Training loss : ", np.mean(supervised_g_loss)
next_bc = data_loader.next_batch()
_, g_loss = trainable.pretrain_step(sess, next_bc, np.zeros(candidates.shape))
supervised_g_loss.append(g_loss)
return np.mean(supervised_g_loss)
def loss_checker(sess, trainable, data_loader, candidates):
supervised_g_loss = []
data_loader.reset_pointer()
for batch in xrange(1):
next_bc = data_loader.next_batch()
''' [pretrain_update, pretrain_loss] '''
_, g_loss = trainable.pretrain_step(sess, next_bc, candidates)
supervised_g_loss.append(g_loss)
return np.mean(supervised_g_loss)
def get_dict_D(D, nega, posi):
nega = list(nega)
posi = list(posi)
max_len = max(len(nega), len(posi))
if len(nega) < max_len:
nega = nega + [0] * (max_len-len(nega))
else:
posi = posi + [0] * (max_len-len(posi))
posilabel = [[1, 0]]
negalabel = [[0, 1]]
feed_dict = {D.input_x: np.array([posi, nega]),
D.input_y: np.concatenate([posilabel, negalabel], 0),
D.dropout_keep_prob: 0.9}
return feed_dict
def translator(q, a, a_):
dict_path = "./datasets/dict.json"
word_dict = json.load(open(dict_path))
temp = []
print "Question:"
for item in q:
for name, idx in word_dict.items():
if idx == item:
temp.append(name)
print temp
temp = []
print "Real answer:"
for item in a:
for name, idx in word_dict.items():
if idx == item:
temp.append(name)
print temp
temp = []
print "From GAN:"
for item in a_:
for name, idx in word_dict.items():
if idx == item:
temp.append(name)
print temp
if __name__ == "__main__":
nega = [1, 2, 3, 4, 4, 4, 4]
posi = [1, 2, 3, 4]
print get_dict_D(nega, posi)
| true |
5e706118980e642541b84d9dce825261c1b107c0 | Python | kylin910/python | /Demos/Guess_Game/Guess_game2_Limited Times.py | UTF-8 | 577 | 3.796875 | 4 | [] | no_license | print("猜猜我心里想的数字是几?\n");
record=3;
result=6;
guess=0;
while guess!=result and record>=1:
guess=int(input("\n输入你猜得数字:\n"));
if guess==result:
print("哇哦!心有灵犀一点通!^_^");
else:
if guess<8:
record=record-1;
print("猜错了哦! 猜得有点小了哦!"+" 还有"+str(record)+"次机会了!");
else:
record=record-1;
print("猜错了哦! 猜得有点大了哦!"+" 还有"+str(record)+"次机会了!");
| true |
5e56a602c2d91713d19750a490c6beadd13d614b | Python | penyugalova/dz | /dz4/dz4_functions.py | UTF-8 | 7,532 | 3.21875 | 3 | [] | no_license | # encoding: utf-8
__author__ = 'Liubov Penyugalova'
import sys
import pickle
import os
import sys
#1.1 Берем задачу из дз №3. Выделяем следующие процессы в функции: ввод команды - отдельная функция
def action_func():
act = input('Нажмите 1 чтобы внести данные. 2 чтобы увидеть весь список. 3 чтобы найти определенный атомобилью 4 чтобы выйти: ')
action = act
return action
#1.2 Берем задачу из дз №3. Выделяем следующие процессы в функции: сообщение в случае ошибки ввода команды - отдельная функция
def error():
print('Ошибка ввода!')
#1.3 Берем задачу из дз №3. Выделяем следующие процессы в функции: Ввести и Вывести - 2 отдельные функции
#ввести
def input_data(file):
user_input = []
key = input('Введите марку автомобиля:')
key = key.lower()
key2 = key.replace(' ', '')
while key2.isalpha() == False:
error()
key = input('Введите марку автомобиля:')
key = key.lower()
key2 = key.replace(' ', '')
continue
value = input('Введите его мощность:')
value_to_check = str(value)
value_to_check = value_to_check.replace(".","")
while value_to_check.isnumeric() == False:
error()
value = input('Введите мощность автомобиля числами (разделитель порядков - точка):')
value_to_check = str(value)
value_to_check = value_to_check.replace(".","")
continue
tup = (key, value)
user_input.append(tup)
act = action_func()
if act == 1:
input_data()
else:
user_inp = user_input
pickle_close(user_inp, file)
#вывести
def show_data(file_db):
lst_1 = sorted(file_db)
print('Обычная сортировка')
for i in lst_1:
print(i, ':', file_db.get(i))
lst_2 = list(file_db.keys())
for n1, i in enumerate(lst_2):
for n2, ii in enumerate(lst_2):
if i<ii:
lst_2[n1], lst_2[n2] = lst_2[n2], lst_2[n1]
print('Пузырьковая сортировка')
for i in lst_2:
print(i, ':', file_db.get(i))
#1.4 Берем задачу из дз №3. Выделяем следующие процессы в функции: поиски по условию - 3 отдельные функции соответственно
def filter_strict():
user_input = input('Введите марку автомобиля: ')
user_input = user_input.strip()
user_input = user_input.lower()
if user_input in file_db.keys():
print(user_input, ':', file_db.get(user_input))
def filter_non_strict():
user_input = input('Введите элемент марки автомобиля: ')
user_input = user_input.strip()
user_input = user_input.lower()
for i in file_db.keys():
key_string = str(i)
if key_string.find(user_input) != -1:
print(i, ':', file_db.get(i))
def filter_power():
user_input = input('Введите мощность автомобиля одной или двумя (если интересует промежуток) цифрами с пробелом: ')
user_input = user_input.strip()
user_input = user_input.split()
if len(user_input) == 1:
for i in file_db.keys():
if type(file_db.get(i)) == str:
val = [file_db.get(i)]
else:
val = file_db.get(i)
if len(val) > 1:
for ii in val:
if user_input[0] == ii:
print(i, ':', ii)
else:
if user_input[0] == val[0]:
print(i, ':', val[0])
elif len(user_input) == 2:
n1 = int(user_input[0])
n2 = int(user_input[1])
if n1 > n2:
n1, n2 = n2, n1
for i in file_db.keys():
if type(file_db.get(i)) == str:
val = [file_db.get(i)]
else:
val = file_db.get(i)
if len(val) > 1:
for ii in val:
if int(ii) >= n1 and int(ii) <= n2:
print(i, ':', ii)
else:
if int(val[0]) >= n1 and int(val[0]) <= n2:
print(i, ':', val[0])
else:
user_input = input('Неверно указана мощность.')
#1.5 Берем задачу из дз №3. Выделяем следующие процессы в функции: сохранение в pickle и загрузка из pickle - 2 отдельные функции
#загрузка
def pickle_open():
try:
f = open('database.pickle', 'rb')
file_db = pickle.load(f, encoding="utf-8")
f.close()
except:
file_db = {}
act = input('База данных не сформирована. Нажмите 1 чтобы внести данные и создать файл в рабочей директории. Нажмите 2, чтобы попасть в основное меню: ')
if act == '1':
input_data()
elif act == '2':
pass
else:
pass
return file_db
#сохранение
def pickle_close(user_input, file_db):
if not file_db:
file_db.update({
user_input[0][0]:user_input[0][1]
})
for i in user_input:
if i[0] in file_db.keys():
if type(file_db.get(i[0])) == str:
val = [file_db.get(i[0])]
else:
val = file_db.get(i[0])
lst = set(val)
lst.add(i[1])
file_db.update(
{
i[0]:lst
}
)
else:
file_db.update(
{
i[0]:i[1]
}
)
f = open('database.pickle', 'wb')
pickle.dump(file_db, f)
f.close()
# функция самой программы
def programma(act):
if act == '1':
file_db = pickle_open()
input_data(file_db)
action = action_func()
programma(action)
elif act == '2':
file_db = pickle_open()
file = file_db
show_data(file)
action = action_func()
programma(action)
elif act == '3':
sec_action = input('Введите 1 для поиска с точным совпадением. Введите 2 для поиска по вххождению слова в название. Введите 3 для поиска по мощности')
if sec_action == '1':
filter_strict()
elif sec_action == '2':
filter_non_strict()
elif sec_action == '3':
filter_power()
else:
error()
action = action_func()
programma(action)
elif act == '4':
sys.exit()
else:
error()
action = action_func()
programma(action)
action = action_func()
programma(action)
file_db = pickle_open()
act = ''
action = action_func()
programma(action) | true |
6c9ea3a6c27c4775d12f84ed52ef5a4e358d7810 | Python | daemon/keras-base | /base.py | UTF-8 | 1,581 | 2.546875 | 3 | [
"MIT"
] | permissive | from collections import deque
from collections import namedtuple
import os
import re
class Learner(object):
def __init__(self, model, model_io_mgr=None, save_interval=100):
self.model = model
self.model_io_mgr = model_io_mgr
self.n_iter = 0
self.save_interval = save_interval
if model_io_mgr:
self.n_iter = model_io_mgr.load()
def train(self):
self.do_train()
self.n_iter += 1
if self.n_iter % save_interval == 0:
self.model_io_mgr.save(self.n_iter)
class ModelFileManager(object):
def __init__(self, model, name, n_history=5, folder="."):
self.model = model
self.n_history = n_history
self.folder = folder
self.name = name
self.file_pattern = r"^.*{}.hdf5.(\d+)$".format(name)
self.reload_savepoints()
def reload_savepoints(self):
savepoints = []
for name in os.listdir(self.folder):
m = re.match(self.file_pattern, name)
if not m:
continue
self.savepoints.append((os.path.abspath(name), int(m.group(1))))
self.savepoints = deque(sorted(savepoints, key=lambda x: x[1]))
def load(self, file=None):
if not file:
try:
file = self.savepoints[-1][0]
except:
return
self.model.load_weights(file)
return int(re.match(self.file_pattern, file).group(1))
def save(self, n_iter):
file = os.path.join(self.folder, "{}.hdf5.{}".format(self.name, n_iter))
if len(self.savepoints) == self.n_history:
os.remove(self.savepoints.popleft()[0])
self.savepoints.append((file, n_iter))
self.model.save_weights(file)
| true |
dde1d65e4c0621431960d4f9f29616c44da73d64 | Python | tlevine/data-guacamole | /groceries.py | UTF-8 | 1,155 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python2
'''Here are some queries against the SupermarketAPI.
I didn't wind up using them.'''
import os
from urllib import urlencode
from urllib2 import urlopen
from lxml.etree import fromstring
APIKEY = os.environ['SUPERMARKETAPI_APIKEY']
def get_xml(url):
'Download a url and parse it to an XML tree.'
handle = urlopen(url)
# Remove the schema
text = handle.readline()
text += handle.readline().split(' ')[0] + '>\r\n'
text += handle.read()
return fromstring(text)
def stores(state, city):
'Get an array of stores for a given city.'
params = urlencode({
'APIKEY': APIKEY,
'SelectedState': state,
'SelectedCity': city,
})
url = 'http://www.SupermarketAPI.com/api.asmx/StoresByCityState?' + params
return get_xml(url)
def store_ids(state, city):
'Get a list of storeIds for a given city.'
return map(unicode, stores(state, city).xpath('//StoreId/text()'))
def search_for_item(store_id, item_name):
url = 'http://www.SupermarketAPI.com/api.asmx/SearchForItem?APIKEY=%s&StoreId=%s&ItemName=%s' % (APIKEY, store_id, item_name)
return get_xml(url)
| true |
e697daba745b0baa6d91e6c797980d23e748b9dc | Python | quemazon/mycode | /misc/work/rename files.py | UTF-8 | 1,698 | 2.671875 | 3 | [] | no_license | #this generates a file list for the database and
# Import the os module, for the os.walk function
import os
import re
import pickle
from Tkinter import Tk
from tkFileDialog import asksaveasfilename
from tkFileDialog import askdirectory
from numpy import genfromtxt
global namedic
namedic = {}
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
rootDir = askdirectory() +'/'# show an "Open" dialog box and return the path to the selected file
# Set the directory you want to start from
#rootDir = u'X:/4E10/Lot 2 data/'
for dirName, subdirList, fileList in os.walk(rootDir):
for fname in fileList:
if re.match('[\d,3].*V.CSV', fname): #used this for some matching
#if re.match('.*[\d,3].CSV', fname):
#if re.match('.*[\d,3]V.CSV', fname):
#newname = fname.split('.')[0][-3:] + 'C_V.CSV'
#newname = fname.split('_')[2] + 'C_V.CSV'
#newname = dirName + newname
oldname = dirName + fname
newname = dirName + 'temp/' + fname
data = genfromtxt(oldname, delimiter=',')
tmp1 = data[:,[1]]
tmp2 = data[:,[2]]
data[:,[1]] = tmp2
data[:,[2]] = tmp1
import numpy
numpy.savetxt(newname, data, delimiter=",")
#direct = fname.rpartition('/')[0] + '/'
#print(oldname)
#print(newname)
#os.rename(oldname, newname)
#namedic[fname[0:3]] = fullname
#print('\t%s' % fullname)
print(oldname)
##file = asksaveasfilename() # show a "save" dialog box and return the path to the selected
##pickle.dump(namedic, open(file, "wb"))
| true |
d219661a37113899e016fffbdf6103fda556bbe2 | Python | Aasthaengg/IBMdataset | /Python_codes/p03496/s125620312.py | UTF-8 | 654 | 3.125 | 3 | [] | no_license | n = int(input())
A = list(map(int, input().split()))
ans = []
maxa = max(A)
mina = min(A)
if abs(mina) > abs(maxa):
for i in range(n)[:0:-1]:
if A[i-1] > A[i]:
while A[i-1] > A[i]:
ma = min(A)
argmina = A.index(ma)
A[i-1] += ma
ans.append((argmina+1, i-1+1))
else:
for i in range(n-1):
pa = A[i]
aa = A[i+1]
if pa > aa:
while pa > A[i+1]:
ma = max(A)
argmaxa = A.index(ma)
A[i+1] += ma
ans.append((argmaxa+1, i+1+1))
print(len(ans))
for a in ans:
print(*a)
| true |
f3637a9e0e0ad058e87504fa37b255000bcca657 | Python | markanethio/gretel-python-client | /src/gretel_client/transformers/fpe/fpe_prefix_cipher.py | UTF-8 | 1,300 | 2.90625 | 3 | [
"Apache-2.0",
"Python-2.0"
] | permissive | from Crypto.Cipher import AES
from gretel_client.transformers.fpe.crypto_aes import IV_ZERO, CipherError
class FpePrefixCipher:
def __init__(self, min: int, max: int, key: bytes):
self.min = min
self.max = max
self.range = max - min
weights = []
cipher = AES.new(key, AES.MODE_CBC, iv=IV_ZERO)
for i in range(0, self.range):
cipher_val = int.from_bytes(cipher.encrypt(key), byteorder='big', signed=False)
weights.append([cipher_val, i])
weights.sort()
self._encrypt = [x[1] for x in weights]
# TODO: clean the for loop up
i = 0
for x in weights:
x[0] = x[1]
x[1] = i
i += 1
weights.sort()
self._decrypt = [x[1] for x in weights]
def encrypt(self, value: int) -> int:
if value < self.min or value > self.max:
raise CipherError(f"input value out of range ({self.min}...{self.max})")
value -= self.min
return self._encrypt[value] + self.min
def decrypt(self, value: int) -> int:
if value < self.min or value > self.max:
raise CipherError(f"input value out of range ({self.min}...{self.max})")
value -= self.min
return self._decrypt[value] + self.min
| true |
5c6ad791d38d23ce29445086fb55370978458e70 | Python | ksaisujith/Course-Projects-Python | /Balances/balances.py | UTF-8 | 9,456 | 3.65625 | 4 | [] | no_license | __author__ = "Sai Sujith Kammari"
'''
CSCI-603: LAB 8
Author1: SAI SUJITH KAMMARI
Author2: KEERTHI NAGAPPA PRADHANI
Draws the balance with the weights provided. If the torque on the right doesn't match then exits
'''
import sys
import turtle
# global constants for turtle window dimensions
WINDOW_WIDTH = 2000
WINDOW_HEIGHT = 2000
class Beam:
"""
This class creates the beams with hangers and the distance of each hanger
"""
__slots__ = 'dist', 'hanger', 'beam_hang_index', 'scaling_factor'
def __init__(self,beam_details):
"""
Creates the beam object
:param beam_details: the list of hangers and their positions on the beam
"""
self.dist = []
self.hanger = []
for index in range(len(beam_details)):
# Splitting the values to the distances and hangers
if index%2 == 0:
self.dist.append(int(beam_details[index]))
else:
if isinstance(beam_details[index], Beam):
# Creating a beam
self.hanger.append(beam_details[index])
else:
# Creating the weight
self.hanger.append(Weight(beam_details[index]))
if len(self.hanger) != len(self.dist):
# Validating the input provided
print('Wrong input provided')
else:
# Validating if the input provided is balanced
self.beam_hang_index = self.get_midindex()
#Setting the scaling factor for the beam which is used while drawing
self.scaling_factor = self.weight() * 10
# Adjusting the scaling factor so that the balance doesn't go out of screen
if self.scaling_factor > WINDOW_WIDTH / 2:
self.scaling_factor = WINDOW_WIDTH / 4
# Checking if a weight is missing
if len([missing_weights for missing_weights in self.hanger if missing_weights.weight() == -1 ]) > 0:
# Finding the missing weight
self.find_missing_weight()
if not self.isbalanced():
# Weights are not balanced
print("It is not a balanced weights. Please provide a balance with equal torques")
sys.exit(0)
def get_total_torque(self):
"""
Returns the total torque on the beam
:return: torque value
"""
return sum([abs(hang.weight() * dist) for hang, dist in zip(self.hanger[:], self.dist[:])])
def get_midindex(self):
"""
Returns the index on the hanger which where the hanger is hanging
:return: middle index
"""
return len([left_numbers for left_numbers in self.dist if int(left_numbers) < 0])
def find_missing_weight(self):
"""
Finding and replacing the value of the empty weight in the balance to balance the weight
:return: None
"""
# Finding the indexof the missing value
missing_value_index = 0
for hang in self.hanger:
if hang.weight() == -1:
break
else:
missing_value_index += 1
# Finding the missing value
total_torque = 0
for index in range(len(self.dist)):
total_torque += self.dist[index] * self.hanger[index].weight()
total_torque -= self.dist[missing_value_index] * self.hanger[missing_value_index].weight()
missing_value = abs(total_torque // self.dist[missing_value_index])
# Replacing the missing value in the balance
self.hanger[missing_value_index].set_weight(missing_value)
print("Missing weight found and replaced with " + str(missing_value))
def weight(self):
"""
returns the total weight of the beam
:return: total Weight
"""
return sum([weight.weight() for weight in self.hanger if not isinstance(weight, Beam)]) + \
sum([beam.weight() for beam in self.hanger if isinstance(beam, Beam)])
def isbalanced(self):
"""
Checks if the beam is balanced or not
:return: True if balanced else False
"""
left_torque = abs(sum([hang.weight() * dist for hang,dist in zip(self.hanger[:self.beam_hang_index],self.dist[:self.beam_hang_index])]))
right_torque = abs(sum([hang.weight() * dist for hang,dist in zip(self.hanger[self.beam_hang_index:],self.dist[self.beam_hang_index:])]))
if left_torque != right_torque:
# Not Balanced
return False
else:
# Balanced
return True
def turtle_window_init (self,myWindowname):
"""
Initialize for drawing.
:pre: pos (0,0), heading (east), up
:post: pos (0,WINDOW_HEIGHT/3), heading (east), up
:return: None
"""
turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,
WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)
turtle.up()
turtle.left(90)
turtle.forward(WINDOW_HEIGHT/3)
turtle.right(90)
turtle.title(myWindowname)
def drawBalance(self, beam, factor, vert_height = WINDOW_HEIGHT/10):
"""
Draws the Beam
:pre: pos (0,0), heading (east), down
:post: pos (0,0), heading (east), down
:param beam: beam that has to be drawn
:param vert_height: length that has to be suspended
:return: None
"""
turtle.right(90)
turtle.forward(vert_height)
turtle.left(90)
for d in range(len(beam.dist)):
turtle.forward(factor * int(beam.dist[d]))
if isinstance(beam.hanger[d],Beam):
self.drawBalance(beam.hanger[d], factor/3)
else:
self.drawWeight(beam.hanger[d].weight())
for x in range(abs(int(beam.dist[d]))):
# Drawing the beam factor indicators
turtle.backward(factor * (int(beam.dist[d]) / abs(int(beam.dist[d] ))))
turtle.right(90)
turtle.forward(10)
turtle.backward(10)
turtle.left(90)
# Returning back
turtle.left(90)
turtle.forward(vert_height)
turtle.right(90)
def drawWeight(self,weight, hanger_length = WINDOW_HEIGHT // 100 ):
"""
Draws the weights
:pre: pos (0,0), heading (east), down
:post: pos (0,0), heading (east), down
:param weight: Weight that has to be written
:return: None
"""
turtle.left(180)
turtle.right(90)
turtle.backward(hanger_length * 1.5)
turtle.up()
turtle.backward(hanger_length * 2.5)
turtle.write(weight)
turtle.forward(hanger_length * 4 )
turtle.right(90)
turtle.down()
def draw(self):
"""
Draws the Balance
:pre: pos (0,0), heading (east), up
:post: pos (0,0), heading (east), up
:return: None
"""
self.turtle_window_init("Balance")
turtle.down()
self.drawBalance(self, self.scaling_factor )
turtle.up()
turtle.mainloop()
class Weight:
"""
Holds the values of the weight
"""
__slots__ = 'value'
def __init__(self,weight):
"""
Creates the weight object
:param weight: value of the weight that is being hanged
"""
self.value = int(weight)
def weight(self):
"""
Returns the weight of the object
:return value: The value of the weight
"""
return self.value
def set_weight(self, weight):
"""
Sets the value
:param weight: weight that has to be set
:return: None
"""
self.value = weight
def main():
"""
Main Method
:return: None
"""
beams = {}
# Reading the file
try:
with open(input("Please provide the file location which have balance details"), mode='r') as input_file:
for line in input_file:
command = line.strip().split(' ')
beams[command[0]] = command[1:]
except:
# Handling the wrong file name
print("Bad file. Please check the file")
sys.exit(1)
# Replacing the beam values
balance_arguments = ' '.join(beams['B'])
while 'B' in balance_arguments:
for key in beams.keys():
if key != 'B':
balance_arguments = balance_arguments.replace(key, "ss*" +','.join(beams[key])+"*")
balance_arguments = balance_arguments.replace('ss*','Beam([')
balance_arguments = balance_arguments.replace('*', '])').split(' ')
# Building the arguments for the main Balance
arguments = []
count = 0
for index in range(len(balance_arguments)):
if (balance_arguments[index] != ''):
if count % 2 == 0:
# adding the distances
arguments.append(balance_arguments[index])
else:
# creating the objects
if 'Beam' in balance_arguments[index]:
arguments.append(eval(balance_arguments[index]))
else:
arguments.append(int(balance_arguments[index]))
count += 1
# Creating the main Balance
balance = Beam(arguments)
# Drawing the main balance
balance.draw()
if __name__ == '__main__':
main() | true |
24743dd4564c0fdff078aea75adc154eb5cc738b | Python | UT-CHG/meteor | /meteo_data/hwind_data/hwind_file.py | UTF-8 | 6,475 | 3.03125 | 3 | [] | no_license | import sys
import numpy as np
import matplotlib.pyplot as plt
import re
import datetime as dt
from utilities.utilities import haversine
class HwindFile:
def __init__(self, pressure_central, ramp, file_path):
self.pressure_central = pressure_central
self.ramp = ramp
self.file_path = file_path
time_data = re.search(r'(?P<time>[0-9]{4}_[0-9]{4}_[0-9]{4})', self.file_path)
if (time_data == None):
print("Unable to extract time data for hwind file: {}. Exiting!".format(self.file_path))
sys.exit()
self.time = dt.datetime.strptime(time_data.group('time'), '%Y_%m%d_%H%M')
def parse_data(self):
with open(self.file_path) as hwind_file:
#Skip first line
hwind_file.readline()
#DX=DY= 6.02280 KILOMETERS.
dx_dy = re.search(r'DX=DY=(?P<dx_dy>[\s0-9.Ee]+)', hwind_file.readline())
self.dx_dy = float(dx_dy.group('dx_dy'))
#STORM CENTER LOCALE IS(LON) EAST LONGITUDE and (LAT) NORTH LATITUDE ... STORM CENTER IS AT (X,Y)=(0,0)
storm_center = re.search(
r'STORM CENTER LOCALE IS (?P<lon>[\s\-0-9.Ee]+) EAST LONGITUDE and (?P<lat>[\s\-0-9.Ee]+) NORTH LATITUDE',
hwind_file.readline())
self.storm_center_lon = float(storm_center.group('lon'))
self.storm_center_lat = float(storm_center.group('lat'))
#Skip next line
hwind_file.readline()
#Read in grid x_coordinates
n_x_coordinates = int(hwind_file.readline())
x_coordinates = []
parsed_coordinates = 0
while parsed_coordinates < n_x_coordinates:
x_coord_string = hwind_file.readline().split()
for x_coord in x_coord_string:
x_coordinates.append(float(x_coord))
parsed_coordinates += len(x_coord_string)
#Skip next line
hwind_file.readline()
#Read in grid y_coordinates
n_y_coordinates = int(hwind_file.readline())
y_coordinates = []
parsed_coordinates = 0
while parsed_coordinates < n_y_coordinates:
y_coord_string = hwind_file.readline().split()
for y_coord in y_coord_string:
y_coordinates.append(float(y_coord))
parsed_coordinates += len(y_coord_string)
#Skip next line
hwind_file.readline()
#Read in grid lon_coordinates
n_lon_coordinates = int(hwind_file.readline())
lon_coordinates = []
parsed_coordinates = 0
while parsed_coordinates < n_lon_coordinates:
lon_coord_string = hwind_file.readline().split()
for lon_coord in lon_coord_string:
lon_coordinates.append(float(lon_coord))
parsed_coordinates += len(lon_coord_string)
#Skip next line
hwind_file.readline()
#Read in grid lat_coordinates
n_lat_coordinates = int(hwind_file.readline())
lat_coordinates = []
parsed_coordinates = 0
while parsed_coordinates < n_lat_coordinates:
lat_coord_string = hwind_file.readline().split()
for lat_coord in lat_coord_string:
lat_coordinates.append(float(lat_coord))
parsed_coordinates += len(lat_coord_string)
lat_coordinates = np.asarray(lat_coordinates)
#Store coordinates in arrays
self.cartesian_coordinates = np.column_stack((x_coordinates, y_coordinates))
self.spherical_coordinates = np.column_stack((lon_coordinates, lat_coordinates))
#Store grid point coordinates (all combinations)
x_grid, y_grid = np.meshgrid(self.cartesian_coordinates[:, 0], self.cartesian_coordinates[:, 1])
self.cartesian_grid_point_coordinates = np.column_stack((x_grid.flatten(), y_grid.flatten()))
lon_grid, lat_grid = np.meshgrid(self.spherical_coordinates[:, 0], self.spherical_coordinates[:, 1])
self.spherical_grid_point_coordinates = np.column_stack((lon_grid.flatten(), lat_grid.flatten()))
#Skip next line
hwind_file.readline()
#Read in grid velocity data
n_grid_string = hwind_file.readline().split()
nx_grid = int(n_grid_string[0])
ny_grid = int(n_grid_string[1])
vx = []
vy = []
for i in range(0, ny_grid):
temp_vx = []
temp_vy = []
parsed_velocities = 0
while parsed_velocities < nx_grid:
velocities = re.finditer(r'\((?P<u>[\s\-0-9.Ee]+),(?P<v>[\s\-0-9.Ee]+)\)', hwind_file.readline())
for velocity in velocities:
temp_vx.append(float(velocity.group('u')))
temp_vy.append(float(velocity.group('v')))
parsed_velocities += 1
vx.append(temp_vx)
vy.append(temp_vy)
#Store velocities
self.vx = np.asarray(vx).flatten()
self.vy = np.asarray(vy).flatten()
#find vmax and rmax
speed = np.hypot(np.asarray(vx), np.asarray(vy))
max_index = np.unravel_index(np.argmax(speed, axis=None), speed.shape)
self.vmax = speed[max_index]
lon_max = lon_grid[max_index]
lat_max = lat_grid[max_index]
self.rmax = haversine(self.storm_center_lon, self.storm_center_lat, lon_max, lat_max)
def plot_data(self):
nx = len(self.cartesian_coordinates[:, 0])
ny = len(self.cartesian_coordinates[:, 1])
speed = np.hypot(self.vx, self.vy).reshape((nx, ny))
plt.streamplot(self.cartesian_coordinates[:, 0], self.cartesian_coordinates[:, 1], self.vx.reshape((nx, ny)),
self.vy.reshape((nx, ny)), color=speed, density=2)
plt.colorbar()
x_min = min(self.cartesian_coordinates[:, 0])
x_max = max(self.cartesian_coordinates[:, 0])
y_min = min(self.cartesian_coordinates[:, 1])
y_max = max(self.cartesian_coordinates[:, 1])
plt.axis([x_min, x_max, y_min, y_max])
plt.axes().set_aspect('equal')
plt.grid()
plt.show()
| true |
dbc134f9eabe1f526de3ed71800dab6041d77d6c | Python | noahbroyles/minecraft-server | /main.py | UTF-8 | 3,050 | 2.65625 | 3 | [] | no_license | import os
import shutil
import subprocess
import requests
from tqdm import tqdm
dir_path = os.path.dirname(os.path.realpath(__file__))
server_dir = dir_path + "/server/"
""" Description
:type file:
:param file:
:type word:
:param word: - what should be replaced
:type replacement:
:param replacement: - with what the word should be replaced
:raises:
:rtype:
"""
def find_and_replace(file_, word, replacement):
with open(file_, "r+") as f:
text = f.read()
f.write(text.replace(word, replacement))
def setup():
server_name = input("What is the server's name? ")
os.chdir(server_dir)
bat_file = open(server_dir + "start.sh", "w")
bat_file.write("java -Xmx2048M -Xms1024M -jar server.jar nogui")
bat_file.close()
subprocess.call(server_dir + "start.sh")
find_and_replace(server_dir + "eula.txt", "false", "true")
print("Agreed to EULA")
try:
find_and_replace(
server_dir + "server.properties",
"motd=A Minecraft Server",
"motd=" + server_name,
)
except Exception as err:
print(f"failed, for some reason ({str(err)})")
answer_start = input(
"Wanna start the server right away or change the settings first? [start/conf]"
)
def ask_option():
if answer_start == "start":
subprocess.call(server_dir + "start.sh")
elif answer_start == "conf":
os_command_string = "notepad.exe server.properties"
os.system(os_command_string)
else:
ask_option()
ask_option()
def get_server_jar_url(version_):
"""
Gets the latest server.jar for specified Minecraft version
:param version_: Minecraft version
:return: Download url in the form of https://launcher.mojang.com/v1/objects/.../server.jar
"""
versions_manifest = requests.get("https://launchermeta.mojang.com/mc/game/version_manifest.json").json()
version_manifest = list(filter(lambda a: a["id"] == version_, versions_manifest["versions"]))[0]
download_url = requests.get(version_manifest["url"]).json()
return download_url["downloads"]["server"]["url"]
if not os.path.exists("server/server.jar"):
chunk_size = 1024
version = input("Please send what version of Minecraft you want to use: ")
url = get_server_jar_url(version)
req = requests.get(url, stream=True)
total_size = int(req.headers["content-length"])
if not os.path.isdir("server/"):
os.mkdir("server")
with open("server.jar", "wb") as file:
for data in tqdm(
iterable=req.iter_content(chunk_size=chunk_size),
total=total_size / chunk_size,
unit="KB",
):
file.write(data)
shutil.move(dir_path + "/server.jar", server_dir + "server.jar")
setup()
else:
if os.path.isdir("server/logs"):
os.chdir(server_dir)
print("World Exists, starting server....")
subprocess.call("start.sh")
else:
setup()
| true |
295eafbea5cd7070d2783c137bb2948a8185e540 | Python | markm3010/counter-code | /count_words.py | UTF-8 | 557 | 3.59375 | 4 | [] | no_license | import re
'''
Count number of words in a file
'''
def count_words(f: object) -> object:
with open(f) as fh:
data_src = fh.read()
word_detector = re.compile(r'(\b[1-9a-z?.-]+)(\s|\n)?', re.IGNORECASE)
data_set = word_detector.finditer(data_src)
ct = 0
for word in data_set:
print(word.group(0), end='')
ct += 1
return ct
if __name__ == '__main__':
filename = "word_probs.txt"
word_ct = count_words(filename)
print(f'\n\nNumber of words in {filename}: {word_ct}')
| true |
ff7f4e4f1a81b71362babf772e2e1d1ec2fe6ee4 | Python | 718795325/python-code | /django/2_模板/代码/day02/App/views.py | UTF-8 | 2,130 | 2.625 | 3 | [] | no_license | from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
# Create your views here.
from django.urls import reverse
def index(request):
return HttpResponse("首页")
def show(request,name,age):
return HttpResponse(name + ":"+str(age))
def call(request,phone):
# return HttpResponse(phone)
return render(request,'call.html')
def req_res(request):
print(request.method)
# print(request.GET.get("username")) # 如果username不存在,会返回None
# print(request.GET["username"]) # 如果username不存在,会报错
# url只能ascii码,不是ascii必须要转义
# print(request.GET.getlist('username'))
#post
# print(request.POST.getlist('username'))
# print(request.path)
# 来源页面
# http://127.0.0.1:9001/user/call/0998-12345678/
# print(request.META.get("HTTP_REFERER"))
# print(request.META.get("REMOTE_ADDR"))
# 响应对象
# res = HttpResponse("ok")
# res.status_code = 300
# res.content = b"123"
# res = render(request,'call.html')
# return res
# JsonResponse
# return JsonResponse({'code':1})
# return JsonResponse([1,2,3,4,5],safe=False)
# 重定向
# return HttpResponseRedirect("/user/")
# return redirect("/user/")
# return redirect("/user/show/{}/{}/".format('tom',30))
# return redirect("/user/show/admin/50/")
# 应用内跳转可以不写http://127.0.0.1:9003
# return redirect("http://127.0.0.1:9003/user/show/admin/50/")
# 应用外跳转
# return redirect("https://www.baidu.com/")
# 反向定位:由应用命名空间:name来确定路由
# print(reverse("App:index"))
# return redirect(reverse("App:index")) # 不带参数
# 如果参数有名字,必须使用关键字传参的方式
# print(reverse("App:show",kwargs={'name':'admin','age':20}))
# return redirect(reverse("App:show",kwargs={'name':'admin','age':20}))
# print(1 / 0)
print(reverse("App:call",args=('0311-58931234',)))
return redirect(reverse("App:call",args=('0311-58931234',)))
| true |
08f3c7e10bd0de4de6b784737e3223f46c944a48 | Python | MrHowdyDoody/testrepo | /a2.py | UTF-8 | 3,759 | 3.8125 | 4 | [] | no_license | # Do not import any modules. If you do, the tester may reject your submission.
# Constants for the contents of the maze.
# The visual representation of a wall.
WALL = '#'
# The visual representation of a hallway.
HALL = '.'
# The visual representation of a brussels sprout.
SPROUT = '@'
# Constants for the directions. Use these to make Rats move.
# The left direction.
LEFT = -1
# The right direction.
RIGHT = 1
# No change in direction.
NO_CHANGE = 0
# The up direction.
UP = -1
# The down direction.
DOWN = 1
# The letters for rat_1 and rat_2 in the maze.
RAT_1_CHAR = 'J'
RAT_2_CHAR = 'P'
class Rat:
""" A rat caught in a maze. """
def __init__(self, symbol, row, col):
""" (Rat, str, int, int) -> NoneType
Initialize the rat with name of rat(one charcter), row (integer), column(integer)
"""
self.symbol = symbol
self.row = row
self.col = col
self.num_sprouts_eaten = 0
def set_location(self, row, col):
""" (Rat, int, int) -> NoneType
Set the rat's row and col instance variables to the given row and column.
"""
self.row = row
self.col = col
def eat_sprout(self):
"""
(Rat) -> NoneType
"""
self.num_sprouts_eaten += 1
def __str__(self):
"""
(Rat) -> str
"""
return self.symbol + ' at (' + str(self.row) + ', ' + str(self.col) + ') ate ' + str(self.num_sprouts_eaten) + ' sprouts.'
class Maze:
""" A 2D maze. """
def __init__(self, maze, rat_1, rat_2):
""" __init__(Maze, list of list of str, Rat, Rat))"""
self.maze = maze
self.rat_1 = rat_1
self.rat_2= rat_2
accumulater = 0
for x in maze:
accumulater = accumulater + x.count(SPROUT)
self.num_sprouts_left = accumulater
def is_wall(self, row, col):
if self.maze[row][col] == WALL:
return True
else:
return False
def get_character(self,row,col):
""" self.row = row
self.col = col"""
if row == self.rat_1.row and col == self.rat_1.col:
return self.rat_1.symbol
else:
if row == self.rat_2.row and col == self.rat_2.col:
return self.rat_2.symbol
else:
return self.maze[row][col]
def move(self, rat, vert, hor):
"""(Maze, Rat, int, int) -> NoneType
>>>maze2.move(rat1,1,1)
>>>maze2.rat1.__str__
"""
orig_col = rat.col
orig_row = rat.row
new_row = orig_row + vert
new_col = orig_col + hor
if self.is_wall(new_row,new_col) == False:
rat.col = new_col
rat.row = new_row
# check if new position is a sprout. If so eat the sprout, replace maze with HALL. decrease the number of sprouts in maze.
if self.maze[new_row][new_col] == SPROUT:
rat.eat_sprout()
self.maze[new_row][new_col]= HALL
self.num_sprouts_left = self.num_sprouts_left-1
rat1 = Rat('a',3,2)
rat2 = Rat('b',2,2)
maze1=[['#', '#', '#', '#', '#', '#', '#'],
['#', '.', '.', '.', '.', '.', '#'],
['#', '.', '#', '#', '#', '.', '#'],
['#', '.', '.', '@', '#', '.', '#'],
['#', '@', '#', '.', '@', '.', '#'],
['#', '#', '#', '#', '#', '#', '#']]
maze2 = Maze(maze1,rat1,rat2)
| true |
ae7911f2694aeccd46c077661c21c6a8a22d65cc | Python | smwhr/lahotline | /Action.py | UTF-8 | 1,267 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
def playable(func):
def wrapper(*args):
func(*args)
if(args[0].description is not None):
args[1].player.say(args[0].description)
return wrapper
class Action(object):
pass
class Goto(Action):
def __init__(self, room_name):
self.room_name = room_name
def execute(self, game):
old_room = game.s("current_room")
new_room = game.s("current_room", game.rooms[self.room_name])
game.on_room_change(old_room, new_room)
class ChangeVar(Action):
def __init__(self, key, value, ns = None, description = None):
self.key = key
self.value = value
self.ns = ns
self.description = description
@playable
def execute(self, game):
game.s(self.key, self.value, ns = self.ns)
class Lambda(Action):
def __init__(self, func, description = None):
self.func = func
self.description = description
@playable
def execute(self, game):
self.func(game)
class Describe(Action):
def __init__(self, description = None):
self.description = description
@playable
def execute(self, game):
pass
class Group(Action):
def __init__(self, *args):
self.actions = list(args)
def execute(self, game):
for action in self.actions:
action.execute(game)
| true |
e5695ff36da356cdaf8e8573a6f062b15adf588e | Python | rev233/huolala | /wordcount.py | UTF-8 | 2,611 | 3.1875 | 3 | [] | no_license | import re # 正则表达式库
import collections # 词频统计库
import numpy as np # numpy数据处理库
import jieba # 结巴分词
from wordcloud import wordcloud, ImageColorGenerator # 词云展示库
from PIL import Image # 图像处理库
import matplotlib.pyplot as plt # 图像展示库
# 读取文件
fn = open('result.txt', 'r', encoding='utf-8') # 打开文件
string_data = fn.read() # 读出整个文件
fn.close() # 关闭文件
# 文本预处理
pattern = re.compile(u'\t|\n|\.|-|:|;|\)|\(|\?|"') # 定义正则表达式匹配模式
string_data = re.sub(pattern, '', string_data) # 将符合模式的字符去除
# 文本分词
seg_list_exact = jieba.cut(string_data, cut_all=False) # 精确模式分词
object_list = []
remove_words = [u'的', u',', u'和', u'是', u'随着', u'对于', u'对', u'等', u'能', u'都', u'。', u' ', u'、', u'中', u'在', u'了',
u'通常', u'如果', u'我们', u'需要', '我', '你', '有', '也', '就', 'Author', 'content', 'name', 'Answer',
'知乎', '货', '拉拉', '就是', '但是', '所以', '吗', ';', '"', '"', '但', '2', '1', '女孩', '人', '没有', '!', '?'
, '什么', '一个', '这个', '跳车', '说', '自己', '不', ',', '不会', '可能', '因为', '会', '可以', '被', '应该', '让', '要',
'上', '一', '没', '”', '这', ':', '(', ')', '/', '3', '“', '还', '还是', '啊', '—', ';'] # 自定义去除词库
for word in seg_list_exact: # 循环读出每个分词
if word not in remove_words: # 如果不在去除词库中
object_list.append(word) # 分词追加到列表
# 词频统计
word_counts = collections.Counter(object_list) # 对分词做词频统计
word_counts_top10 = word_counts.most_common(15) # 获取前10最高频的词
print(word_counts_top10) # 输出检查
word_counts_top10 = str(word_counts_top10)
# 词频展示
mask = np.array(Image.open('background.jpg')) # 定义词频背景
img_colors = ImageColorGenerator(mask) # 提取背景图片颜色
wc = wordcloud.WordCloud(
font_path='simfang.ttf', # 设置字体格式
mask=mask, # 设置背景图
max_words=200, # 最多显示词数
max_font_size=180, # 字体最大值
background_color='white',
width=640, height=480,
scale=0.6,
colormap='binary',
)
wc.generate_from_frequencies(word_counts) # 从字典生成词云
#wc.recolor(color_func=img_colors) #重新上色
plt.imshow(wc) # 显示词云
plt.axis('off') # 关闭坐标轴
plt.show() # 显示图像
wc.to_file('wordcloud.png')
| true |
5dca326063709c96415eda832062845e04ea0eea | Python | Moscdota2/Archivos | /python1/UniversidadPython/iterarrangos.py | UTF-8 | 71 | 3.015625 | 3 | [] | no_license | tupa = (13,1,8,3,2,5,8)
for n in tupa:
if n <= 5:
print(n) | true |
b90f6431bdde74a842c606d97d34e5157e56d900 | Python | partho-maple/coding-interview-gym | /leetcode.com/python/785_Is_Graph_Bipartite.py | UTF-8 | 789 | 3.21875 | 3 | [
"MIT"
] | permissive | from collections import deque
class Solution(object):
def isBipartite(self, graph):
"""
:type graph: List[List[int]]
:rtype: bool
"""
color = {}
for node in range(len(graph)):
if node not in color and graph[node]:
queue = deque([node])
color[node] = 0
while queue:
currentNode = queue.popleft()
for neighbour in graph[currentNode]:
if neighbour not in color:
queue.append(neighbour)
color[neighbour] = color[currentNode] ^ 1
elif color[neighbour] == color[currentNode]:
return False
return True
| true |
c5232323f9f1372e57cad7793479f4e85c98df78 | Python | cgcai/alfred-currency-convert | /lib/currency.py | UTF-8 | 2,664 | 2.671875 | 3 | [] | no_license | import json
import time
from lib.openexchangerates import OpenExchangeRates as API
CURRENCY_CACHE = 'currencies.json'
RATES_CACHE = 'latest.json'
RATES_FRESHNESS = 6 * 60 * 60 # 6 hours
class Conversion(object):
def __init__(self, api_key, currency_cache=CURRENCY_CACHE,
rates_cache=RATES_CACHE, rates_freshness=RATES_FRESHNESS):
self.__api_key = api_key
self.__currency_cache = currency_cache
self.__rates_cache = rates_cache
self.__rates_freshness = rates_freshness
self.__api = API(self.__api_key)
def __refresh_currencies(self):
currencies = self.__api.currencies()
with open(self.__currency_cache, 'w') as f:
json.dump(currencies, f, indent=2, sort_keys=True)
return currencies
def __refresh_rates(self):
rates = self.__api.latest()
with open(self.__rates_cache, 'w') as f:
json.dump(rates, f, indent=2, sort_keys=True)
return rates
def supported_currencies(self):
currencies = None
try:
with open(self.__currency_cache, 'r') as f:
currencies = json.load(f)
except IOError:
currencies = self.__refresh_currencies()
return currencies
def __get_rates(self):
rates = None
try:
with open(self.__rates_cache, 'r') as f:
rates = json.load(f)
except IOError:
rates = self.__refresh_rates()
if not Conversion.__is_fresh(rates['timestamp'],
self.__rates_freshness):
rates = self.__refresh_rates()
return rates['rates']
def convert(self, amount, base, target):
base = base.upper()
target = target.upper()
rates = self.__get_rates()
retval = {
'base_amount': amount,
'base': base,
'target': target
}
# Check if conversion is supported.
if base not in rates or target not in rates:
retval['status'] = 'unsupported'
retval['target_amount'] = -1
return retval
# Convert 'amount' to USD.
# (The free OpenExchangeRates API restricts base currency to USD)
if base != 'USD':
amount /= rates[base]
# Convert a USD amount to the target currency.
result = amount * rates[target]
retval['status'] = 'success'
retval['target_amount'] = result
return retval
@staticmethod
def __is_fresh(data_ts, freshness):
now = time.time()
delta = now - data_ts
return delta < freshness
| true |
af4e345f89c61cdce08df2dd2472ef6cb8631dd5 | Python | MaximDmitrievich/IT-master | /MasterWork/MLVision/ViolaJohnes/main.py | UTF-8 | 1,658 | 2.71875 | 3 | [] | no_license | from argparse import ArgumentParser
import time
import cv2
import numpy as np
def integral_image(img):
int_img = np.zeros(img.shape)
for i, _ in enumerate(img):
for j, _ in enumerate(img):
if i is not 0 and j is not 0:
int_img[i, j] = img[i, j] + img[i - 1, j]
elif i is not 0 and j is 0:
int_img[i, j] = img[i,j] + img[i - 1 ,j]
elif j is not 0 and i is 0:
int_img[i, j] = img[i, j] + img[i, j - 1]
return int_img
def main(path):
#img = cv2.imread(path)[::-1]
img = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
clf = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
print(f'========\tSTARTING SCORE TEST OF VIOLA-JONES ALGORITM OF OBJECT DETECTION\t========\n')
print(cv2.getBuildInformation())
start_t = time.perf_counter()
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
end_t = time.perf_counter()
img_to_gray = end_t - start_t
start_t = time.perf_counter()
clf.detectMultiScale(gray, 1.3, 125)
end_t = time.perf_counter()
detection = end_t - start_t
print(f'Time has passed:\t{img_to_gray + detection}\t\n')
print(f'Image converting:\t{img_to_gray}')
print(f'Object detection:\t{detection}')
print(f'Image shape in numpy:\t{img.shape}')
return 0
if __name__ == '__main__':
parser = ArgumentParser(description='Process path to file')
parser.add_argument('filepath', metavar='PATH', help='path for the file', type=str)
args = parser.parse_args()
print('\n')
main(args.filepath)
print('\n') | true |
c2ec202b8dbf99de4f510121007aa9c4e1568952 | Python | aDENTinTIME/holbertonschool-higher_level_programming | /0x01-python-if_else_loops_functions/backup_9.py | UTF-8 | 125 | 3.5 | 4 | [] | no_license | #!/usr/bin/python3
def print_last_digit(number):
string = str(number)
print(string[-1:], end="")
return int(string[-1:])
| true |
6c2cb90bb432ffecd85dcded88c9daf27c2b9ad5 | Python | Fluorescence-Tools/tttrlib | /examples/release_highlights/plot_release_highlights_0_20_0.py | UTF-8 | 2,472 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | """
========================================
Release Highlights for tttrlib 0.23
========================================
.. currentmodule:: tttrlib
We are pleased to announce the release of tttrlib 0.20, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <changes_0_20>`.
To install the latest version with conda::
conda install -c tpeulen tttrlib
"""
# %%
# Test for plotting
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x = np.linspace(0, 5)
ax.plot(x, np.sin(x))
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Improved PTU header support
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
print("Better Header support")
# %%
# Writing of TTTR data
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
import tttrlib
# with TemporaryDirectory(prefix="tttrlib_temp_") as tmpdir:
# estimator = make_pipeline(
# KNeighborsTransformer(n_neighbors=10, mode='distance'),
# Isomap(n_neighbors=10, metric='precomputed'),
# memory=tmpdir)
# estimator.fit(X)
#
# # We can decrease the number of neighbors and the graph will not be
# # recomputed.
# estimator.set_params(isomap__n_neighbors=5)
# estimator.fit(X)
| true |
950da2d4636b8e267f88af63ac5388760cb13eaa | Python | PranaliDesai/Algorithms_DataS | /Calculate_Size_of_a_binary_tree.py | UTF-8 | 124 | 3 | 3 | [] | no_license | def size(node):
if node is None:
return 0
else:
return (size(node.left)+ 1 + size(node.right))
| true |
2f1676db0602c195803aa12116e38b0ad6b0c22e | Python | leonamtv/mlp | /main_and.py | UTF-8 | 582 | 2.875 | 3 | [] | no_license | from core.MLP import MLP
from random import shuffle
EPOCHS = 2000
mlp = MLP(2, 3, 1, 0.3)
dataset = [
([ 0, 0 ], [ 0 ]),
([ 0, 1 ], [ 0 ]),
([ 1, 0 ], [ 0 ]),
([ 1, 1 ], [ 1 ])
]
for i in range ( EPOCHS ) :
erroAproxEpoca = 0
erroClassEpoca = 0
data = dataset
shuffle ( data )
for sample in data :
erro_aprox, erro_class = mlp.treinar ( sample[0], sample[1] )
erroAproxEpoca += erro_aprox
erroClassEpoca += erro_class
print(f"Época {i + 1} \t| Erro aprox: { erroAproxEpoca } \t| Erro class: { erroClassEpoca } ")
| true |
5e05d44ae8a5a45becbaad7e8b2fbc01623113e4 | Python | CommissarMa/PythonTutorCMa | /3Python重要数据类型/3_2字符串/3_2_1概述.py | UTF-8 | 362 | 3.09375 | 3 | [] | no_license | # 字符串是一种有序的字符集合,用于表示文本数据。
# 字符串中的字符可以是ASCII字符、各种符号以及各种Unicode字符。
# 严格意义上,字符串属于不可变序列,意味着不能直接修改字符串。
# 字符串中的字符安装从左到右的顺序,具有位置顺序,即支持索引、分片等操作。 | true |
c758f8567c9a8b1c0d9ca89edaf81a2fb38bc733 | Python | vandosant/data-science-from-scratch | /correlation.py | UTF-8 | 1,368 | 3.625 | 4 | [] | no_license | from __future__ import division
def dot(v1, v2):
return sum(v1_i * v2_i for v1_i, v2_i in zip(v1, v2))
def mean(x):
return sum(x) / len(x)
def de_mean(x):
x_bar = mean(x)
return [x_i - x_bar for x_i in x]
def covariance(x, y):
n = len(x)
return dot(de_mean(x), de_mean(y)) / (n - 1)
def sum_of_squares(v):
return dot(v, v)
def variance(x):
n = len(x)
deviations = de_mean(x)
return sum_of_squares(deviations) / (n - 1)
def standard_deviation(x):
import math
return math.sqrt(variance(x))
def correlation(x, y):
stdev_x = standard_deviation(x)
stdev_y = standard_deviation(y)
if stdev_x > 0 and stdev_y > 0:
return covariance(x, y) / stdev_x / stdev_y
else:
return 0
def rank_simple(vector):
return sorted(range(len(vector)), key=vector.__getitem__)
def rank(v):
v_i = list(range(len(v)))
v_i.sort(key=lambda x: v[x])
result = [0] * len(v_i)
for i, v_i in enumerate(v_i):
result[v_i] = i
return result
def ranked_correlation(x, y):
ranked_x = rank(x)
ranked_y = rank(y)
return correlation(ranked_x, ranked_y)
juice_popularity = [10, 9.8, 8, 7.8, 7.7, 7, 6, 5, 4, 2]
juice_price = [200, 44, 32, 24, 22, 17, 15, 12, 8, 4]
print correlation(juice_popularity, juice_price)
print ranked_correlation(juice_popularity, juice_price)
| true |
7115100d1cf7ece4342ec27c55c7ec63a26b218b | Python | pperone/FinnBot | /finn_bot.py | UTF-8 | 2,483 | 2.609375 | 3 | [] | no_license | import os
import time
import re
from slackclient import SlackClient
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
finn_bot_id = None
RTM_READ_DELAY = 1
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
def parse_bot_commands(slack_events, counter):
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = parse_direct_mention(event["text"])
if "attachments" in event:
if event["attachments"][0]["author_subname"] == 'BugBot':
handle_command('assign', event["channel"], counter)
elif user_id == finn_bot_id:
return message, event["channel"]
return None, None
def parse_direct_mention(message_text):
matches = re.search(MENTION_REGEX, message_text)
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def handle_command(command, channel, counter):
response = None
if command.startswith('assign'):
if len(takers) > 0:
response = takers[counter]
else:
response = "There is no one assigned for taking tasks yet. Use the *add* command followed by a user mention."
if len(takers) > counter + 1:
counter += 1
if command.startswith('list'):
if len(takers) > 0:
response = takers
else:
response = "There is no one assigned for taking tasks yet. Use the *add* command followed by a user mention."
if command.startswith('add'):
mention = command.split()[1]
if mention:
takers.append(mention)
response = "{} added to bug squashing squad.".format(mention)
else:
response = "Not a valid addition. Try tagging someone."
slack_client.api_call(
"chat.postMessage",
channel = channel,
text = response,
as_user = True
)
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
print("Finn Bot connected and running!")
finn_bot_id = slack_client.api_call("auth.test")["user_id"]
takers = []
counter = 0
while True:
command, channel = parse_bot_commands(slack_client.rtm_read(), counter)
if command:
handle_command(command, channel, counter)
time.sleep(RTM_READ_DELAY)
else:
print("Connection failed. Exception traceback printed above.")
| true |
14ec6f114ee407067bcb866e1d0c80e634a6a33c | Python | Edyta2801/Python-kurs-infoshareacademy | /code/Day_12/pole_klasy.py | UTF-8 | 4,134 | 4.21875 | 4 | [] | no_license | # pola klasy
class Pracownik(object):
'''Definiuje Pracownika'''
# pola klasy - widoczne przez wszystkie instancje
liczba_pracownikow = 0
roczna_podwyzka = 5
def __init__(self, imie, stanowisko):
'''Konstruktor.
imie, nazwisko - str, str
'''
self.imie = imie
self.stanowisko = stanowisko
# aktualizujemy pole klasy liczba_pracownikow
# przy tworzeniu kazdego nowego obiektu Pracownik
Pracownik.liczba_pracownikow += 1
def ustaw_wynagrodzenie(self, kwota):
'''Ustawia wynagrodzenie Pracownika
to jest metoda instancji - zmiany dokonuje w zmiennych instancji
'''
self.wynagrodzenie = kwota
def daj_roczna_podwyzke(self):
'''Daje roczna podwyzke - zwieksza wynagrodzenie o wartosc okresloną
w polu klasy
'''
self.wynagrodzenie += self.wynagrodzenie * (1 / self.roczna_podwyzka)
# gdyby implementacja obliczenia wygladala tak:
# self.wynagrodzenie += self.wynagrodzenie * (1 / Pracownik.roczna_podwyzka)
# to korzystalibyśmy zawsze z pola klasy roczna_podwyżka, nawet gdyby instancja
# miała własną zmienną roczna_podwyzka
def __str__(self):
'''Własna implementacja dla wydruku obiektu'''
return ('{} stanowisko: {} pensja: {}'.format(self.imie, self.stanowisko, self.wynagrodzenie))
def __del__(self):
'''Wlasna implementacja dla usuwania obiektu
'''
print('Pracownik {} zostal usunięty'.format(self.imie))
Pracownik.liczba_pracownikow -= 1
print('Aktualna liczba pracowników:'.format(Pracownik.liczba_pracownikow))
print('--- Tworzymy dwoch pracownikow i okreslamy wynagrodzenia ---')
prac1 = Pracownik('John Turturo', 'aktor')
prac2 = Pracownik('John Travolta', 'gwiazda')
prac1.ustaw_wynagrodzenie(5000)
prac2.ustaw_wynagrodzenie(8000)
print(prac1)
print(prac2)
print('\n--- Sprawdzamy liczbe pracownikow - wyswietlamy pole klasy liczba_pracownikow ---')
print('Pracownik.liczba_pracowników =', Pracownik.liczba_pracownikow)
print('prac1.liczba_pracownikow =', prac1.liczba_pracownikow)
print('prac2.liczba_pracownikow =', prac2.liczba_pracownikow)
print('\n --- wysokość podwyżki ---')
print('Pracownik.roczna_podwyzka =',Pracownik.roczna_podwyzka)
print('prac1.roczna_podwyzka =',prac1.roczna_podwyzka)
print('prac2.roczna_podwyzka', prac2.roczna_podwyzka)
print('\n--- zmieniamy wys podwyzki na 8 ---')
Pracownik.roczna_podwyzka = 8
print('Pracownik.roczna_podwyzka =',Pracownik.roczna_podwyzka)
print('prac1.roczna_podwyzka =',prac1.roczna_podwyzka)
print('prac2.roczna_podwyzka', prac2.roczna_podwyzka)
print('\n--- Patrzymy w namespace (__dict__) i widzimy do jakich atrybutow maja dostęp ---')
print('\nPracownik.__dict__:\n',Pracownik.__dict__)
print('\nprac1.__dict__:\n',prac1.__dict__)
print('\nprac2.__dict__:\n',prac2.__dict__)
print('\nJak widzimy instancje nie mają pola roczna_podwyzka, dlatego szukaja go wyzej w klasie')
print('\n--- zmieniamy wys podwyzki w instancji prac2 na 12 ---')
print('Jak widać wyżej, w __dict__ instancji prac2 nie ma pola roczna_podwyzka')
prac2.roczna_podwyzka = 12
print('Pracownik.roczna_podwyzka =',Pracownik.roczna_podwyzka)
print('prac1.roczna_podwyzka =',prac1.roczna_podwyzka)
print('prac2.roczna_podwyzka', prac2.roczna_podwyzka)
print('\nWidzimy, ze wartosc zmienila sie tylko dla instancji prac2.')
print("Zajrzyjmy jeszcze raz do namespace'ów:")
print('\nPracownik.__dict__:\n',Pracownik.__dict__)
print('\nprac1.__dict__:\n',prac1.__dict__)
print('\nprac2.__dict__:\n',prac2.__dict__)
print('Jak widzimy, teraz obiekt prac2 ma własną zmienną roczna_podwyzka, i z niej bedzie brac wartosc.')
print('Obiekt prac1 nie ma takiej zmiennej, wiec dalej bedzie korzystac z wartosci w polu klasy\n')
# del (prac2.roczna_podwyzka)
print('--- Usuwamy pracownika prac2 ---')
del prac2
print(Pracownik.liczba_pracownikow)
print('\n--- Teraz program się kończy i Python sam usuwa wszystkie pozostałe obiekty ---')
print('--- Automatycznie wywoła destruktor __del__ dla pozostałych obiektów: ---\n')
| true |
d3467921ec0bc0b28137f2f039a963fe2c66979b | Python | YJAJ/Intelligent_systems | /Assignment1/Breadth_First_Search.py | UTF-8 | 3,553 | 3.484375 | 3 | [] | no_license | from collections import deque
from Utility import is_goal_state, total_state, is_queen_safe_col
class Breadth_First_Search():
'''Implement pruned breadth first search'''
def __init__(self):
self.frontier = deque()
self.explored = set()
self.solutions = list()
self.nSolution = 0
self.state = 1
self.queen_position = 0
def bfs_search(self, n_queen):
# initial state no queens
n_queen_square = n_queen**2
# calculate and print out total state
total_state(n_queen)
# push first row's state to frontier queue
for index in range(n_queen):
self.frontier.append([self.queen_position])
self.queen_position = (self.queen_position + 1)%n_queen_square
self.state += 1
# check whether the initial state is the goal state
initial_queens = self.frontier[0]
if len(initial_queens)==n_queen and is_goal_state(initial_queens, n_queen):
self.nSolution += 1
self.solutions.append(initial_queens)
# start from level 2 since the empty initial state was level 0 and the one queen on each row was level 1
depth = 2
# while depth level is smaller and equal to the number of queen
while depth <= n_queen:
branch_node = 0
# branch size represents the number of nodes on the same depth
# with pruning, the size of nodes equals to n queen is multiplied by n queen subtracting each one for each depth
branch_size = n_queen
for i in range(0, depth-1):
branch_size *= (n_queen-i)
# while branch size is smaller than the expected number of branch nodes and frontier is not empty
while branch_node < branch_size and len(self.frontier)!=0:
current_queens = self.frontier.popleft()
# explored set is not required where row separation and column check is undertaken
# child's node
queen_position = self.queen_position
# if the child node is not in the current node, the child node can be appended to the list of nodes for later expansion
for index in range(n_queen):
temp_queens = current_queens.copy()
temp_queens.append(queen_position)
# no check required for frontier or explored set
# because there will be no equivalent sets given the conditions of pruning
# check whether the current expanded nodes are the goal state.
# if so, add one to solution and append the solution to the solution list.
if len(temp_queens)==n_queen and is_goal_state(temp_queens, n_queen):
self.nSolution += 1
self.solutions.append(temp_queens)
# add the new queens only if queens are safe in a column-wise check
if len(temp_queens) < n_queen and is_queen_safe_col(temp_queens, n_queen):
self.frontier.append(temp_queens)
queen_position = (queen_position + 1)%n_queen_square
self.state += 1
branch_node += 1
self.queen_position = self.queen_position + n_queen
depth += 1
# print the total number of solutions found and return the list of solutions
print("Number of solutions found: %d" % self.nSolution)
return self.solutions | true |
9feaaf3cde25820ed476bbbf290055e4ebc16c54 | Python | du4182565/dctest | /python/python_test/storage.py | UTF-8 | 1,039 | 2.828125 | 3 | [] | no_license | __author__ = 'Administrator'
class StoreTest(object):
databases = []
def initbase(basename):
basename = {}
basename[1] = {}
basename[2] = {}
basename[3] = {}
def add_database(self,basename):
initbase(basename)
dabases.append(basename)
return
def del_database(basename):
if basename in databases:
databases.pop(basename)
else:
print '%s is not exit' %basename
return
def view_data_base():
for index,base in enumerate(basename):
print '%s,%s' % index,base
return
def add_member(database,member_name):
for index,name in enumerate(database):
database[index].setdefault(name,[]).append(member_name)
return
# def del_member(database,member_name,label):
# database[label].setdefault(name,[]).del(member_name)
# return
def view_member(database,member_name,label):
print database[label].[member_name]
return
| true |
9a7fa26211cc1a8accbef8933cedd62375a3784e | Python | gottaegbert/penter | /tensorflow_v2/dragen1860/Tutorials/01-TF2.0-Overview/conv_train.py | UTF-8 | 5,027 | 2.796875 | 3 | [
"MIT"
] | permissive | import os
import time
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'}
import tensorflow as tf
from tensorflow.python.ops import summary_ops_v2
from tensorflow import keras
from tensorflow.keras import datasets, layers, models, optimizers, metrics
model = tf.keras.Sequential([
layers.Reshape(
target_shape=[28, 28, 1],
input_shape=(28, 28,)),
layers.Conv2D(2, 5, padding='same', activation=tf.nn.relu),
layers.MaxPooling2D((2, 2), (2, 2), padding='same'),
layers.Conv2D(4, 5, padding='same', activation=tf.nn.relu),
layers.MaxPooling2D((2, 2), (2, 2), padding='same'),
layers.Flatten(),
layers.Dense(32, activation=tf.nn.relu),
layers.Dropout(rate=0.4),
layers.Dense(10)])
compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
optimizer = optimizers.SGD(learning_rate=0.01, momentum=0.5)
def mnist_datasets():
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
# Numpy defaults to dtype=float64; TF defaults to float32. Stick with float32.
x_train, x_test = x_train / np.float32(255), x_test / np.float32(255)
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
return train_dataset, test_dataset
train_ds, test_ds = mnist_datasets()
train_ds = train_ds.shuffle(60000).batch(100)
test_ds = test_ds.batch(100)
def train_step(model, optimizer, images, labels):
# Record the operations used to compute the loss, so that the gradient
# of the loss with respect to the variables can be computed.
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss = compute_loss(labels, logits)
compute_accuracy(labels, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
def train(model, optimizer, dataset, log_freq=50):
"""
Trains model on `dataset` using `optimizer`.
"""
# Metrics are stateful. They accumulate values and return a cumulative
# result when you call .result(). Clear accumulated values with .reset_states()
avg_loss = metrics.Mean('loss', dtype=tf.float32)
# Datasets can be iterated over like any other Python iterable.
for images, labels in dataset:
loss = train_step(model, optimizer, images, labels)
avg_loss(loss)
if tf.equal(optimizer.iterations % log_freq, 0):
# summary_ops_v2.scalar('loss', avg_loss.result(), step=optimizer.iterations)
# summary_ops_v2.scalar('accuracy', compute_accuracy.result(), step=optimizer.iterations)
print('step:', int(optimizer.iterations),
'loss:', avg_loss.result().numpy(),
'acc:', compute_accuracy.result().numpy())
avg_loss.reset_states()
compute_accuracy.reset_states()
def test(model, dataset, step_num):
"""
Perform an evaluation of `model` on the examples from `dataset`.
"""
avg_loss = metrics.Mean('loss', dtype=tf.float32)
for (images, labels) in dataset:
logits = model(images, training=False)
avg_loss(compute_loss(labels, logits))
compute_accuracy(labels, logits)
print('Model test set loss: {:0.4f} accuracy: {:0.2f}%'.format(
avg_loss.result(), compute_accuracy.result() * 100))
print('loss:', avg_loss.result(), 'acc:', compute_accuracy.result())
# summary_ops_v2.scalar('loss', avg_loss.result(), step=step_num)
# summary_ops_v2.scalar('accuracy', compute_accuracy.result(), step=step_num)
# Where to save checkpoints, tensorboard summaries, etc.
MODEL_DIR = '/tmp/tensorflow/mnist'
def apply_clean():
if tf.io.gfile.exists(MODEL_DIR):
print('Removing existing model dir: {}'.format(MODEL_DIR))
tf.io.gfile.rmtree(MODEL_DIR)
apply_clean()
checkpoint_dir = os.path.join(MODEL_DIR, 'checkpoints')
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
NUM_TRAIN_EPOCHS = 5
for i in range(NUM_TRAIN_EPOCHS):
start = time.time()
# with train_summary_writer.as_default():
train(model, optimizer, train_ds, log_freq=500)
end = time.time()
print('Train time for epoch #{} ({} total steps): {}'.format(
i + 1, int(optimizer.iterations), end - start))
# with test_summary_writer.as_default():
# test(model, test_ds, optimizer.iterations)
checkpoint.save(checkpoint_prefix)
print('saved checkpoint.')
export_path = os.path.join(MODEL_DIR, 'export')
tf.saved_model.save(model, export_path)
print('saved SavedModel for exporting.') | true |
3a9bcd279b40b916a8df92da5169d37780d57d4e | Python | EinarK2/einark2.github.io | /_site/Forritun/Forritun 2/Skila 4/mætingarlisti.py | UTF-8 | 362 | 2.96875 | 3 | [
"CC-BY-4.0"
] | permissive | n, r, c = [int(x) for x in input().split()]
l1 = []
l2 = []
l3 = []
for x in range(r):
radirnofn = input()
l1.append(radirnofn)
for x in l1:
l3.extend(x.split())
for x in range(n):
nofn = input()
l2.append(nofn)
if l3[:c] == l2[:c]:
print("left")
else:
print("right")
if l3[c:] == l2[c:]:
print("left")
else:
print("right")
| true |
5f570d811bd2ca14f441341e3f869262250fbd06 | Python | sabeeh99/Batch-2 | /Marjana_Sathar.py | UTF-8 | 289 | 3.703125 | 4 | [] | no_license | #MARJANA SATHAR-2-COUNTDOWN(TIME MODULE)
import time
'''we import time module so we can use sleep which delays time'''
def num():
'''returns value from 1 to 10 in descending order with a time delay of 1 sec'''
for i in range(10,0,-1):
print(i)
time.sleep(1)
num()
| true |
e2102084ef2d969dc1c157500d9b91c11fbec9f7 | Python | git123hub121/Python-spider | /爬取王者荣耀图片/wzry.py | UTF-8 | 2,268 | 2.765625 | 3 | [] | no_license | import requests
import os
url = 'https://pvp.qq.com/web201605/js/herolist.json'
header = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'
}
reponse =requests.get(url=url,headers=header)
hero_list = reponse.json()#这个方法要查一下 见txt 用来对json格式的响应体进行反序列化 转为字典,看着没啥变化,但是就是将格式转变了
#print(hero_list)
#如果不用map函数来写,就这样写,对比一下,还是多学点语法有好处
# name = []
# for i in range(len(hero_list)):
# hero_name = hero_list[i]['cname']#cname英雄名字 **map()** 会根据提供的函数对指定序列做映射。
# #print(hero_name)
# name.append(hero_name)
# print(name)
her0_name = list(map(lambda x: x['cname'],hero_list))
hero_number = list(map(lambda x:x['ename'],hero_list))#ename对应编号
hero_title = list(map(lambda x:x['title'],hero_list))
# skin_name = list(map(lambda x:x['skin_name'].split('|'),hero_list))
def save_IMG():
num = 0
count = 0
h_l = 'http://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/'
for i in hero_number:#列表是可遍历对象
print('='*10)
num = num+1
print(num,hero_name[num-1],hero_title[num-1])
# print(num,name[num-1],hero_title[num-1])
for sk_num in range(15):
hsl = h_l+str(i)+'/'+str(i)+'-bigskin-'+str(sk_num)+'.jpg'
hl = requests.get(hsl) #这一步只是得到了响应,所以才有下面的if判断 可以尝试输出hl查看
if hl.status_code == 200:
count =count+1
print(count,hsl)
# print(skin_name[count-1])
#
# 将图片保存下来,并以"英雄名称_皮肤序号"方式命名
# with open('image/'+hero_name[num]+str(sk_num)+'.jpg','wb') as f:
# f.write(hl.content)
print("总共下载"+count+"张皮肤图片!!!")
save_IMG()
#由于马超没有skin_name参数,所以会出现KeyError: 'skin_name'报错,故暂时不统计皮肤名字!
#大佬爬取链接,可参考:https://blog.csdn.net/u014361280/article/details/104236704
| true |
ba070f30cb1e3213971c6d937339db63a9020041 | Python | ravalrupalj/BrainTeasers | /Edabit/Retrieve_the_Subreddit.py | UTF-8 | 429 | 3.609375 | 4 | [] | no_license | #Retrieve the Subreddit
#Create a function to extract the name of the subreddit from its URL.
import re
def sub_reddit(string):
r=re.findall(r"/(\w+)/$",string)
return ''.join(r)
print(sub_reddit("https://www.reddit.com/r/funny/") )
#➞ "funny"
print(sub_reddit("https://www.reddit.com/r/relationships/") )
#➞ "relationships"
print(sub_reddit("https://www.reddit.com/r/mildlyinteresting/") )
#➞ "mildlyinteresting"
| true |
2084449f72b58dff73d3131e0ee8ed12b44acd71 | Python | phattymcG/useful_scripts | /hashcrack.py | UTF-8 | 1,122 | 3.734375 | 4 | [] | no_license | #!/usr/bin/python
import sys
from hashlib import sha1
def main(wordlist):
"""Crack a hash using a wordlist.
Simple loop to crack a hash. Default is the default user password
algorithm for post-4.1 mySQL. A simple sha1 alternative is listed
as well.
Assumes a wordlist consisting of words separated by newlines.
Due to the use of raw_input, this won't leave hashes in shell
history when executing in non-interactive mode. It will leave
the wordlist file, though.
"""
# read hash to be cracked
hashtocrack = raw_input('Hash to crack: ')
# read line of wordlist file
with open(wordlist,'r') as f:
counter = 0
for word in f:
counter += 1
# create hash
#hash1 = '*' + sha1(word.strip()).hexdigest().upper()
hash1 = '*' + sha1(sha1(word.strip()).digest()).hexdigest().upper()
# compare hashes
if hashtocrack == hash1:
print "\nThe original string is", word
sys.exit()
print "Original string not found in", counter,"hashes"
main(sys.argv[1])
| true |
9487a25bb3ee7a9804e97d4058e2c6f5bbfea889 | Python | victorfarias/Exemplos_sala_ML_2020_1 | /5_normalizacao.py | UTF-8 | 824 | 3.40625 | 3 | [] | no_license | import numpy as np
def normalizacao01(x):
minx = np.amin(x)
maxx = np.amax(x)
return (x-minx)/(maxx-minx)
def normalizacao_media(x):
minx = np.amin(x)
maxx = np.amax(x)
meanx = np.mean(x)
return (x-meanx)/(maxx-minx)
def padronizar(x):
meanx = np.mean(x)
stdx = np.std(x)
return (x-meanx)/stdx
def normalizar_X_media(X):
for j in range(X.shape[1]):
X[:,j] = normalizacao_media(X[:,j])
X = np.array(
[[5 , 0.2],
[2 , 0.1],
[10, 0.7],
[11, 0.5]]
)
print()
norm = normalizacao01(X[:,1])
print(norm)
print(np.amax(norm))
print(np.amin(norm))
print()
norm1 = normalizacao_media(X[:,1])
print(norm1)
print(np.mean(norm1))
print()
norm2 = padronizar(X[:,1])
print(norm2)
print(np.mean(norm2))
print(np.std(norm2))
normalizar_X_media(X)
print(X)
| true |
0724f64598fdbea3ea80d4b1cfd122c57dbef522 | Python | hassyGo/NLP100knock2015 | /iwai/set6/exp57.py | UTF-8 | 1,732 | 3.03125 | 3 | [] | no_license | # !/usr/bin/python
# coding:UTF-8
# 6-(57):かかり受け解析
#Stanford Core NLPの係り受け解析の結果(collapsed-dependencies)を有向グラフとして可視化せよ.可視化には,係り受け木をDOT言語に変換し,Graphvizを用いるとよい.また,Pythonから有向グラフを直接的に可視化するには,pydotを使うとよい.
#.dotの実行方法 -> dot -Tpng 57.dot -o 57.png
import re
import sys
import exp50
def make_collapsed_dependencies(document):
sentence = []
sentences = []
flag = 0
for line in document:
if line == '<collapsed-dependencies>':
flag = 1
if line == '</collapsed-dependencies>':
sentences.append(sentence)
flag = 0
sentence = []
if flag == 1:
sentence.append(line)
return sentences
def search_governor(line):
match = re.match('<governor idx=".+">(\w+)</governor>', line)
if match:
return match.group(1)
def search_dependent(line):
match = re.match('<dependent idx=".+">(\w+)</dependent>', line)
if match:
return match.group(1)
def main():
fw = open('57.dot', 'w')
sys.stdout = fw
document = exp50.read('50.txt.xml')
collapsed_dependencies = make_collapsed_dependencies(document)
print 'digraph sample{'
print '', 'graph [rankdir = LR];'
for line in collapsed_dependencies[2]:
governor = search_governor(line)
dependent = search_dependent(line)
if governor != None:
print governor, '->',
if dependent != None:
print dependent
print '}'
fw.close()
if __name__ == "__main__":
main()
| true |
84918c08eea792888062a74b508da24969ceb1f8 | Python | MYMSSENDOG/leetcodes | /16. 3Sum Closest.py | UTF-8 | 1,392 | 3.359375 | 3 | [] | no_license | def differ (a:int, b:int)->int:
if a>b:
return a-b
else:
return b-a
nums = [0,2,1,-3]
target = 1
difference = 9999999
min_def = 99999999
nums.sort()
n = len(nums)
for mid in range(1, n - 1):
left = 0
right = n - 1
#################################### 기본틀에 추가한 부분 다음#까지 ...
biggest_this_turn = nums[mid] + nums[mid - 1] + nums[n-1]
smallest_this_trun = nums[mid] + nums[mid + 1] + nums[0]
if target<smallest_this_trun:#타겟이 최소보다 크다면 지금 거랑 기록해둔거랑 비교해서 리턴
d = smallest_this_trun - target
if d > difference:
print(min_def)#return min_def
else:
print(smallest_this_trun)#return smallest_
elif target > biggest_this_turn:
min_def = biggest_this_turn
difference = target - biggest_this_turn
continue
####################################
while left < right:
s = nums[left] + nums[mid] + nums[right]
d = differ(target, s)
if difference > d:
difference = d
min_def = s
if d == 0:
print(s)#return d
elif s > target:
if right - 1 == mid:
break
right -= 1
elif s < target:
if left +1 == mid:
break
left += 1
print(min_def) | true |
f2c0e4114b4ec2169941d3eb03d12e26f62d9706 | Python | TrickyOldFox/UsefullThings | /Modules/mod_math.py | UTF-8 | 294 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 14 23:36:35 2019
@author: ihor
"""
def Lsin(a):
import math
return list(map(math.sin,a))
def Lcos(a):
import math
return list(map(math.cos,a))
def Lexp(a):
import math
return list(map(math.exp,a))
| true |
2750753b983a258eadd65a61d585c4ca9ed246b7 | Python | DevaO7/Applied-Programming-Assignments | /APL/A5/Report Files/code1.py | UTF-8 | 1,118 | 3.21875 | 3 | [] | no_license | # Initializing the Potential Array
phi = zeros([Nx, Ny])
x = arange(-(Nx-1)/2, (Nx+1)/2)
y = arange(-(Ny-1)/2, (Ny+1)/2)
X, Y = meshgrid(x, y)
ii = where(X*X+Y*Y <= radius*radius)
phi[ii] = 1
error = []
# Updating the Potential, Computing the Error , Updating the Boundary Conditions
for i in range(Niter):
oldphi = phi.copy()
phi[1:-1, 1:-1] = 0.25 * \
(phi[1:-1, 0:-2] + phi[1:-1, 2:] + phi[0:-2, 1:-1] + phi[2:, 1:-1])
phi[:, 0] = phi[:, 1]
phi[:, -1] = phi[:, -2]
phi[0, :] = phi[1, :]
phi[ii] = 1
error = error+[(abs(phi-oldphi)).max()]
phi1 = phi[::-1]
# Contour plot of the Potential Function
contourf(X, Y, phi1, levels=20)
scatter(ii[0] - (Nx - 1) / 2, ii[1] - (Ny - 1) / 2, color='r', s=20)
xlabel('X axis')
ylabel('Y axis')
title('Contour Plot for the Potential')
show()
# Surface Plot for the potential
fig1 = figure(4) # open a new figure
ax = p3.Axes3D(fig1) # Axes3D is the means to do a surface plot
title('The 3-D surface plot of the potential')
surf = ax.plot_surface(X, Y, phi1.T, rstride=1, cstride=1, cmap=cm.jet)
show()
| true |
ad6f89bb4a59c8a0565151cb269dad69d8fbe2fc | Python | giga487/invertedPendulum | /Amato_Visualizzatore_NoFilter_Quaternioni_CuboNuovo/lettura_seriale.py | UTF-8 | 2,223 | 2.6875 | 3 | [] | no_license | import serial
import threading
from threading import Thread
import filtraggio
import time
import math
class LetturaSeriale(Thread):
def __init__(self,nome,time_read,imu):
Thread.__init__(self)
self.nome = nome
self.time_read = time_read
self.imu = imu
self.lock = threading.Lock()
self.ax = 0
self.ay = 0
self.az = 0
self.gx = 0
self.gy = 0
self.gz = 0
self.mx = 0
self.my = 0
self.mz = 0
self.yaw = 0
self.roll = 0
self.pitch = 0
self.q1 = 0
self.q2 = 0
self.q3 = 0
self.q4 = 0
def convert(self,elemento,old):
try:
data_conversion = float(elemento)
return data_conversion
except:
return old
def run(self):
i = 0
ser = serial.Serial("com7", 38400, timeout= 0.010)
tempo_iniziale = time.time()
tempo_finale = tempo_iniziale + float(self.time_read)
deltat = float(tempo_finale - tempo_iniziale)
dati = [0]
d = 0
while deltat > 0:
d = d + 1
i = i + 1
dati.append(ser.readline())
stringa_esame = str(dati[d])
#stringa_esame = "string_ q1 0 q2 0.4 q3 0.5 q4 0"
stringa_lista = stringa_esame.split(" ")
#print(stringa_lista)
self.lock.acquire()
if stringa_esame.find("string_") != -1:
self.imu.q1 = float(stringa_lista[3])
self.imu.q2 = float(stringa_lista[5])
self.imu.q3 = float(stringa_lista[7])
self.imu.q4 = float(stringa_lista[9])
self.imu.rate = float(stringa_lista[11])
#print("q1 %s q2 %s q3 %s q4 %s rate %s"%(self.imu.q1,self.imu.q2,self.imu.q3,self.imu.q4, self.imu.rate))
self.lock.release()
time.sleep(self.imu.get_tC())
now = time.time()
deltat = float(tempo_finale - now)
print("Fine Processo %s"%self.nome)
| true |
7d36e3995d31b68b3465f05eaf5a17273d2d9e0e | Python | Aasthaengg/IBMdataset | /Python_codes/p00005/s272165987.py | UTF-8 | 194 | 3.328125 | 3 | [] | no_license | def gcd(a,b):
if a%b==0:
return b
return gcd(b,a%b)
while True:
try:
a,b=sorted(map(int,input().split()))
except:
break
print(gcd(a,b),a//gcd(a,b)*b) | true |
fe588f222435a0f560d27014c4f1b996f65ae1c2 | Python | OldJohn86/Langtangen | /chapter3/Heaviside.py | UTF-8 | 252 | 3.5625 | 4 | [] | no_license | def Heaviside(x):
'''
Computes the Heaviside funtion
'''
if x < 0:
return 0
else:
return 1
for i in range(-1,2):
print 'Heaviside(%d) = %d' % (i, Heaviside(i))
'''
python Heaviside.py
Heaviside(-1) = 0
Heaviside(0) = 1
Heaviside(1) = 1
'''
| true |
5f8182c743e4afad041afbc3ad586072d01185f3 | Python | Shrey09/Sentiment-Semantic-Analysis | /sentiment_analysis.py | UTF-8 | 2,777 | 3.328125 | 3 | [] | no_license | import csv
import re
from langdetect import detect
tweet=[] #store raw tweets
tweets=[] # store cleaned tweets
f1=open("positive-words.txt","r") #dictionary of positive words
f2=open("negative-words.txt","r") #dictionary of negative words
f3=open("stop-words.txt","r") #dictionary of stop-words
pos=f1.read()
pos=pos.split("\n")
neg=f2.read()
neg=neg.split("\n")
stopwords=f3.read()
stopwords=stopwords.split("\n")
print("positive dictionary :",pos)
print("negative dictionary :",neg)
print("stopwords dictionary :",stopwords)
with open('stream.csv', 'r') as csvFile: #read tweets
reader = csv.reader(csvFile)
for row in reader:
if len(row)!=0:
try:
language=detect(row[3]) #consider only text from the tweets i.e row[3]
# print(b)
if language=="en": # consider only english tweets
# print(row[3])
tweet.append(row[3])
row = re.sub(r"[^a-zA-Z0-9]+", ' ', row[3]) #reomve sepcial characters
row=row.replace("rt"," ") #remove rt from the tweets
tweets.append(row)
except:
pass
print("Raw tweets\n",tweet)
print("Cleaned tweets \n",tweets)
words=[]
for i in tweets:
# generate tokens by splitting tweets into words
tokens=i.split(" ")
tokens=[token for token in tokens if len(token)!=0]
words.append(tokens)
print("Tokenising tweets \n",words)
no_stopwords=[]
temp=[]
for i in words: #remove stopwords from tweets
for j in i:
if j not in stopwords:
temp.append(j)
no_stopwords.append(temp)
temp=[]
print("Removed the stopwords\n",no_stopwords)
count=0
tweet_polarity=[]
positive=0
negative=0
neutral=0
# determine polarity of tweets
for twt in no_stopwords:
pos_count=0
neg_count=0
polarity=""
temp=[]
for word in twt:
if word in pos:
pos_count+=1
if word in neg:
neg_count+=1
if pos_count>neg_count:
polarity="positive"
positive+=1
elif neg_count>pos_count:
polarity="negative"
negative+=1
else:
polarity="neutral"
neutral+=1
temp.append(tweets[count])
temp.append(polarity)
tweet_polarity.append(temp)
with open("tweet_sentiment.csv","a+",newline='') as csvFile:
columns=["Tweets","Polarity"]
writer=csv.DictWriter(csvFile,fieldnames=columns)
Dict={"Tweets":tweets[count],"Polarity":polarity}
writer.writerow(Dict)
count+=1
print("Total tweets :",count)
print("Tweets with polarity \n",tweet_polarity)
print("Total Positive Tweets :",positive)
print("Total Negative Tweets :",negative)
print("Total Neutral Tweets :",neutral) | true |
3b18bcf021a56f2f10f8339a555cf61746135a39 | Python | j1nn33/study | /python/conspect/Parser/regular/parse_dhcp_snooping.py | UTF-8 | 3,134 | 3.3125 | 3 | [] | no_license | # Разбор вывода команды show ip dhcp snooping с помощью именованных групп
# -*- coding: utf-8 -*-
import re
#'00:09:BB:3D:D6:58 10.1.10.2 86250 dhcp-snooping 10 FastEthernet0/1'
regex = re.compile('(?P<mac>\S+) +(?P<ip>\S+) +\d+ +\S+ +(?P<vlan>\d+) +(?P<port>\S+)')
"""
(?P<mac>\S+) + - в группу с именем 'mac' попадают любые символы, кроме whitespace.
Получается, что выражение описывает последовательность любых символов до пробела
(?P<ip>\S+) + - тут аналогично, последовательность любых символов, кроме whitespace, до пробела.
Имя группы 'ip'
(\d+) + - числовая последовательность (одна или более цифр), а затем один или
более пробелов сюда попадет значение Lease
\S+ + - последовательность любых символов, кроме whitespace сюда попадает тип соответствия
(в данном случае все они dhcp-snooping)
(?P<vlan>\d+) + - именованная группа 'vlan'. Сюда попадают только числовые
последовательности с одним или более символами
(?P<int>.\S+) - именованная группа 'int'. Сюда попадают любые символы, кроме whitespace
"""
result = []
with open('dhcp_snooping.txt') as data:
for line in data:
match = regex.search(line) # поиск строк в файле которые соответсвуют регулярному выражению
#print(match)
if match:
result.append(match.groupdict())
#print('wwwwwwwwww')
#print(match.groupdict())
#print('wwwwwwwwww')
print (result)
print('К коммутатору подключено {} устройства'.format(len(result))) # number of elements
for num, comp in enumerate(result, 1):
print('num',num)
print('comp',comp)
print('Параметры устройства {}:'.format(num))
for key in comp:
print('{:10}: {:10}'.format(key,comp[key]))
'''
Example:
$ python parse_dhcp_snooping.py
К коммутатору подключено 4 устройства
Параметры устройства 1:
mac : 00:09:BB:3D:D6:58
ip : 10.1.10.2
vlan : 10
int : FastEthernet0/1
Параметры устройства 2:
mac : 00:04:A3:3E:5B:69
ip : 10.1.5.2
vlan : 5
int : FastEthernet0/10
Параметры устройства 3:
mac : 00:05:B3:7E:9B:60
ip : 10.1.5.4
vlan : 5
int : FastEthernet0/9
Параметры устройства 4:
mac : 00:09:BC:3F:A6:50
ip : 10.1.10.6
vlan : 10
int : FastEthernet0/3
'''
| true |
dc7ef09e6638691654877ee6d8cf4e083183516f | Python | jonahhill/mlprojects-py | /TimeSeriesRegression/algorithms/XGBoost.py | UTF-8 | 617 | 2.828125 | 3 | [] | no_license | import pandas as pd
import xgboost as xgb
#http://datascience.stackexchange.com/questions/9483/xgboost-linear-regression-output-incorrect
#http://xgboost.readthedocs.io/en/latest/get_started/index.html
#https://www.kaggle.com/c/higgs-boson/forums/t/10286/customize-loss-function-in-xgboost
df = pd.DataFrame({'x':[1,2,3], 'y':[10,20,30]})
X_train = df.drop('y',axis=1)
Y_train = df['y']
T_train_xgb = xgb.DMatrix(X_train, Y_train)
params = {"objective": "reg:linear", "booster":"gblinear"}
gbm = xgb.train(dtrain=T_train_xgb,params=params)
Y_pred = gbm.predict(xgb.DMatrix(pd.DataFrame({'x':[4,5]})))
print Y_pred | true |
7f3a82c677345ec89c558e12350012870464fac5 | Python | ck-china/pythonjc | /mon2/p02/zuoye.py | UTF-8 | 1,109 | 3.140625 | 3 | [] | no_license | class ZhiWu:
def __init__(self,name,gongneng,dongzuo):
self.name=name
self.gongneng=gongneng
self.dongzuo=dongzuo
def jieshao(self):
print('我是 %s ,我的作用是 %s'%(self.name,self.gongneng))
def zhuangtai(self):
print('%s 正在 %s '%(self.name,self.dongzuo))
def __str__(self):
abc=self.name+'正在守护院子,臭傻逼你竟然还在蹦迪土嗨???'
return abc
class JiangShi:
def __init__(self,name,sudu,fuzhuang):
self.name=name
self.sudu=sudu
self.fuzhuang=fuzhuang
def jieshao(self):
print('我是 %s僵尸,我跑得%s'%(self.name,self.sudu))
def zhuangtai(self):
print('%s僵尸穿的%s'%(self.name,self.fuzhuang))
a=ZhiWu('向日葵','产阳光','摇头')
a.jieshao()
a.zhuangtai()
b=JiangShi('普通','慢','普普通通')
b.jieshao()
b.zhuangtai()
c=ZhiWu('豌豆射手','发射社会主义豌豆','疯狂摇头吐豆豆')
c.jieshao()
c.zhuangtai()
d=JiangShi('橄榄球','快',' +15华丽的橄榄球运动套装')
d.jieshao()
d.zhuangtai()
print(a)
print(c)
| true |
6d4f3767ee7015a8802e65a6813daf7c5f5616c4 | Python | kidache97/CasualNotes | /leetcode/leetcode2.py | UTF-8 | 10,665 | 3.59375 | 4 | [] | no_license | from typing import List
from collections import defaultdict
import collections
'''
01背包问题
二分查找
快速排序
两数之和(数组无序)
两数之和(数组有序)
....
'''
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def removeNthFromEnd(head, n): # 删除链表倒数第n个结点
global i
if head is None:
i = 0
return None
head.next = removeNthFromEnd(head.next, n)
i += 1
return head.next if i == n else head
def binary_search(nums: List[int], target: int) -> int:
left, right = 0, len(nums) - 1
while left <= right:
pivot = left + (right - left) // 2
if nums[pivot] == target:
return pivot
if target < nums[pivot]:
right = pivot - 1
else:
left = pivot + 1
return -1
def knapsack(W: List[int], V: List[int]) -> int: # 01背包
# 输入样例:
# W = [4, 1, 2, 3, 4]#重量/体积
# V = [5, 2, 4, 4, 5]#价值
n = W[0] # 物品个数
m = V[0] # 背包容量
dp = [[0 for p in range(m + 1)] for q in range(n + 1)]
for i in range(1, n + 1): # 1-n
for j in range(1, m + 1): # 0-m
dp[i][j] = dp[i - 1][j]
if j >= W[i]:
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - W[i]] + V[i])
print(dp[-1][-1])
# print(dp[n][m])
return dp[n][m]
def maxArea(height: List[int]) -> int:
l, r = 0, len(height) - 1
ans = 0
while l < r:
area = min(height[l], height[r]) * (r - l)
ans = max(ans, area)
if height[l] <= height[r]:
l += 1
else:
r -= 1
return ans
def threeSum(nums: List[int]) -> List[List[int]]: # 排序+双指针
n = len(nums)
nums.sort() # 需要先排序
ans = []
# 枚举 a
for first in range(n):
# 需要和上一次枚举的数不相同
if first > 0 and nums[first] == nums[first - 1]: # 消除相同的数的效率的影响
continue
# c 对应的指针初始指向数组的最右端
third = n - 1
target = -nums[first]
# 枚举 b
for second in range(first + 1, n):
# 需要和上一次枚举的数不相同
if second > first + 1 and nums[second] == nums[second - 1]:
continue
# 需要保证 b 的指针在 c 的指针的左侧
while second < third and nums[second] + nums[third] > target:
third -= 1
# 如果指针重合,随着 b 后续的增加
# 就不会有满足 a+b+c=0 并且 b<c 的 c 了,可以退出循环
if second == third:
break
if nums[second] + nums[third] == target:
ans.append([nums[first], nums[second], nums[third]])
return ans
def quick_sort(nums):
n = len(nums)
# 快速排序
def quick(left, right):
if left >= right:
return nums
pivot = left
i = left
j = right
while i < j:
while i < j and nums[j] > nums[pivot]:
j -= 1
while i < j and nums[i] <= nums[pivot]:
i += 1
nums[i], nums[j] = nums[j], nums[i]
nums[pivot], nums[j] = nums[j], nums[pivot]
quick(left, j - 1)
quick(j + 1, right)
return nums
return quick(0, n - 1)
def two_sum(nums, target): # 在数组中寻找和为target的两个数并返回它们的数组下标
# a = [1, 2, 3, 4]
# print(two_sum(a, 5))
hash_map = {} # 构造字典
result = [] # 若存在多个组合则返回所有结果
for i, num in enumerate(nums):
if target - num in hash_map:
result.append([i, hash_map[target - num]])
hash_map[num] = i # 这句不能放在if语句之前,解决list中有重复值或target-num=num的情况
return result, hash_map
def longestCommonPrefix(strs):
if not strs:
return ""
s1 = min(strs)
s2 = max(strs)
for idx, x in enumerate(s1):
if x != s2[idx]:
return s2[:idx]
return s1
def singleNumber(nums: List[int]) -> int: # 只有一个数字仅出现一次,其余的数字均出现两次
# 1.交换律:a ^ b ^ c <= > a ^ c ^ b
#
# 2.任何数于0异或为任何数
# 0 ^ n = > n
#
# 3.相同的数异或为0: n ^ n = > 0
#
# var
# a = [2, 3, 2, 4, 4]
#
# 2 ^ 3 ^ 2 ^ 4 ^ 4
# 等价于
# 2 ^ 2 ^ 4 ^ 4 ^ 3 = > 0 ^ 0 ^ 3 = > 3
a = 0
for num in nums:
a = a ^ num
return a
def singleNumber2(nums: List[int]) -> int: # 只有一个数字仅出现一次,其余的数字均出现三次
seen_once = seen_twice = 0
for num in nums:
# first appearance:
# add num to seen_once
# don't add to seen_twice because of presence in seen_once
# second appearance:
# remove num from seen_once
# add num to seen_twice
# third appearance:
# don't add to seen_once because of presence in seen_twice
# remove num from seen_twice
seen_once = ~seen_twice & (seen_once ^ num)
seen_twice = ~seen_once & (seen_twice ^ num)
return seen_once
def numIdenticalPairs(nums: List[int]) -> int: # 好数对的数目
ret, dic = 0, defaultdict(int)
for i in nums:
ret, dic[i] = ret + dic[i], dic[i] + 1
print(ret)
return ret
def intersect(nums1, nums2): # 2个数组的交集
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if len(nums1) > len(nums2):
return self.intersect(nums2, nums1) # 对较短的数组进行元素字典统计
m = collections.Counter()
for num in nums1:
m[num] += 1
res = []
for num in nums2:
if m[num] != 0:
m[num] = m[num] - 1
res.append(num)
else:
continue
return res
def maxSubArray1(nums): # 动态规划法
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n == 0:
return 0
dp = nums[:] # ***初始化dp数组,dp[i]存储以nums[i]为结尾的子数组的和的最大值***
for i in range(1, n):
dp[i] = max(dp[i - 1] + nums[i], nums[i])
return max(dp)
def numOfSubarrays(arr, k, threshold):
"""
:type arr: List[int]
:type k: int
:type threshold: int
:rtype: int
"""
count = 0
if len(arr) < k:
return 0
add_num = sum(arr[:k])
target_sum = k * threshold
if add_num >= target_sum:
count += 1
for i in range(k, len(arr)):
add_num = add_num + arr[i] - arr[i - k]
if add_num >= target_sum:
count += 1
print(count)
return count
def buildLinkedListByArray(nums: List[int]):
head = None
if len(nums) <= 0:
return head
head = ListNode(nums[0])
s = head
for i in range(1, len(nums)):
head.next = ListNode(nums[i])
head = head.next
head = s
return head
def reverseLinkedList(head: ListNode) -> ListNode: # 递归实现
if not head or not head.next:
return head
last = reverseLinkedList(head.next)
head.next.next = head
head.next = None
return last
def findOrder(numCourses: int, prerequisites: List[List[int]]) -> List[int]:
stack = list()
edges = defaultdict(list)
# 0表示未曾访问,1表示正在访问,2表示访问完成
visit_stat = [0] * numCourses
has_ring = False
for e in prerequisites:
edges[e[1]].append(e[0])
def dfs(u: int):
nonlocal has_ring
visit_stat[u] = 1
for v in edges[u]:
if visit_stat[v] == 0:
dfs(v)
if has_ring:
return
elif visit_stat[v] == 1:
has_ring = True
return
visit_stat[u] = 2
stack.append(u)
for i in range(numCourses):
if not has_ring and not visit_stat[i]:
dfs(i)
if has_ring:
return []
else:
return stack[::-1]
def permute(nums):
res = []
track = []
def backtrack(nums, track):
if len(track) == len(nums):
res.append(track.copy()) # 深拷贝
return
for i in range(len(nums)):
if nums[i] in track:
continue
track.append(nums[i])
backtrack(nums, track)
track.pop()
backtrack(nums, track)
return res
def canCompleteCircuit(gas: List[int], cost: List[int]) -> int:
for i in range(len(gas)):
if gas[i] < cost[i]:
continue
res_gas = gas[i] - cost[i]
next_i = (i + 1) % len(gas)
while res_gas >= 0 and next_i != i:
res_gas += gas[next_i] - cost[next_i]
next_i = (next_i + 1) % len(gas)
if res_gas >= 0 and next_i == i:
return i
else:
continue
return -1
def insertionSortList(head: ListNode) -> ListNode:
if not head or not head.next:
return head
sq = head # 已经排序的链表
usq = sq.next # 未排序的链表
sq.next = None
q = usq
while q: # 遍历未排序的链表
pre = ListNode(-1)
p = sq
pre.next = p
while p: # 寻找已经排序的链表的插入位置
if q.val <= p.val:
usq = usq.next
q.next = p
pre.next = q
if p == sq:
sq = q
break
if not p.next:
usq = usq.next
q.next = p.next
p.next = q
break
pre = pre.next
p = p.next
q = usq
return sq
if __name__ == "__main__":
# W = [4, 1, 2, 3, 4] # 体积
# V = [5, 2, 4, 4, 5] # 价值
# knapsack(W, V)
# a = [1, 2, 3, 4, 5, 6, 4]
# t, di = two_sum(a, 8)
# arr = [1, 2, 3]
# head = buildLinkedListByArray(arr)
# res = findOrder(2, [[1, 0]])
# print(res)
# res = canCompleteCircuit([3, 3, 4], [3, 4, 4])
# print(res)
# head = buildLinkedListByArray([-1, 5, 3, 4, 0])
# res = insertionSortList(head)
# while res:
# print(res.val)
# res = res.next
def mycmp(x: int) -> (int, int):
return rank[x] if x in rank else x
arr2 = [2, 1, 4, 3, 9, 6]
n = len(arr2)
rank = {x: i - n for i, x in enumerate(arr2)}
print(rank)
arr1 = [2, 3, 1, 3, 2, 4, 6, 7, 9, 2, 19]
arr1.sort(key=mycmp)
print(arr1)
| true |
5b2b8e34c41a318c08bc56c1da453ceaf9bda640 | Python | psioro/CoffeMachine_for_hyperskill | /CoffeeMachine.py | UTF-8 | 4,048 | 3.9375 | 4 | [] | no_license | class CoffeeMachine:
coffee_machine_existence = 0
water = 400
milk = 540
coffee_beans = 120
disposable_cups = 9
money = 550
def __new__(cls):
if cls.coffee_machine_existence < 1:
cls.coffee_machine_existence += 1
return object.__new__(cls)
def __init__(self):
self.start()
def start(self):
while True:
print("Write action (buy, fill, take, remaining, exit):")
action = input("> ")
if action == "buy":
self.buy()
elif action == "fill":
self.fill()
print()
elif action == "take":
self.take()
elif action == "remaining":
self.machine_status()
print()
continue
elif action == "exit":
break
def machine_impact(self, water_x, milk_x, coffe_beans_x, disposable_cups_x, money_x):
self.water += water_x
self.milk += milk_x
self.coffee_beans += coffe_beans_x
self.disposable_cups += disposable_cups_x
self.money += money_x
def buy(self):
print("What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino, back - back to main menu:")
coffee_type = input("> ")
def coffee_check(water_check, milk_check, coffee_beans_check, disposable_cups_check):
if self.water >= water_check and self.milk >= milk_check and self.coffee_beans >= coffee_beans_check \
and self.disposable_cups >= disposable_cups_check:
return True
elif self.water < water_check:
print("Sorry, not enough water")
return False
elif self.milk < milk_check:
print("Sorry, not enough milk")
return False
elif self.coffee_beans < coffee_beans_check:
print("Sorry, not enough coffe beans")
return False
elif self.disposable_cups < disposable_cups_check:
print("Sorry, not enough disposable cups")
return False
if coffee_type == "back":
print()
elif int(coffee_type) == 1:
coffee_possibility = coffee_check(250, 0, 16, 1)
if coffee_possibility:
self.machine_impact(-250, -0, -16, -1, 4)
print("I have enough resources, making you a coffee!", end="\n\n")
elif int(coffee_type) == 2:
coffee_possibility = coffee_check(350, 75, 20, 1)
if coffee_possibility:
self.machine_impact(-350, -75, -20, -1, 7)
print("I have enough resources, making you a coffee!", end="\n\n")
elif int(coffee_type) == 3:
coffee_possibility = coffee_check(200, 100, 12, 1)
if coffee_possibility:
self.machine_impact(-200, -100, -12, -1, 6)
print("I have enough resources, making you a coffee!", end="\n\n")
def fill(self):
print("Write how many ml of water do you want to add:")
water_add = int(input("> "))
print("Write how many ml of milk do you want to add:")
milk_add = int(input("> "))
print("Write how many grams of coffee beans do you want to add:")
coffee_beans_add = int(input("> "))
print("Write how many disposable cups of coffee do you want to add:")
disposable_cups_add = int(input("> "))
self.machine_impact(water_add, milk_add, coffee_beans_add, disposable_cups_add, 0)
def machine_status(self):
print("The coffee machine has:")
print(self.water, "of water")
print(self.milk, "of milk")
print(self.coffee_beans, "of coffee beans")
print(self.disposable_cups, "of disposable cups")
print(self.money, "of money")
def take(self):
print("I gave you ${}".format(self.money))
self.money = 0
# ======================
use_coffee_machine = CoffeeMachine()
| true |
9c826b51daa700d6ad3799776693f0742cbec099 | Python | aismail/AmI-Platform | /core/echo_pdu.py | UTF-8 | 389 | 2.5625 | 3 | [] | no_license | from pdu import PDU
class EchoPDU(PDU):
def __init__(self, **kwargs):
self.QUEUE = kwargs.pop('input_queue', 'echo')
self.OUTPUT_QUEUE = kwargs.pop('output_queue', 'echo_outputs')
super(EchoPDU, self).__init__(**kwargs)
def process_message(self, message):
self.log("Echo-ing message %r" % message)
self.send_to(self.OUTPUT_QUEUE, message) | true |
d23521f16ff3e7b8908c7559b531d936d0f2d814 | Python | Belco90/smartninja-wd1 | /lesson_7/conversor.py | UTF-8 | 626 | 4.3125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
print "Hello! This is a unit converter that converts kilometers into miles."
choice = 'yes'
while choice.lower() == "y" or choice.lower() == "yes":
print "Please enter a number of kilometers that you'd like to convert into miles. Enter only a number!"
km = raw_input("Kilometers: ")
km = int(km)
miles = km * 0.621371
print "{} kilometers is {} miles.".format(km, miles)
print "%s kilometers is %s miles." % (km, miles)
print str(km) + " kilometers is " + str(miles) + " miles."
choice = raw_input("Would you like to do another conversion (y/n): ")
print "Bye!"
| true |
430bca74f48f1cb33f892cc2692fde47cc95e1af | Python | Dan4ik2504/2021-1-MAILRU-SDET-Python-D-Mashkovtsev | /Homework_7/utils/wait.py | UTF-8 | 511 | 2.78125 | 3 | [] | no_license | import time
import exceptions
def wait(method, error=Exception, timeout=10, interval=0.5, **kwargs):
st = time.perf_counter()
last_exception = None
while time.perf_counter() - st < timeout:
try:
result = method(**kwargs)
return result
except error as e:
last_exception = e
time.sleep(interval)
raise exceptions.WaitTimeoutException(
f'Function {method.__name__} timeout in {timeout}sec with exception: "{last_exception}"')
| true |
9a456f8329a291c351c50f42934b2aacb0b6f135 | Python | nciefeiniu/python-test | /web/flask_test/flask_test.py | UTF-8 | 456 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/hello/<name>')
def hello_world(name):
return render_template('hello.html', name=name)
@app.route('/user/<username>', methods=['POST','GET'])
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % username
@app.route('/test/<num>')
def print_number(num):
return num
if __name__ == '__main__':
app.run()
| true |
75ce77882d8d26f58762fe65e70fb14ffb4bf5aa | Python | UKPLab/naacl2019-like-humans-visual-attacks | /code/VIPER/viper_dces.py | UTF-8 | 8,429 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | '''
Use an external lookup method to disturb some text. In this case, we take the textual descriptions of each character,
and find the nearest neighbours in the list of unicode characters by finding characters with the largest number of
matching tokens in the text description.
Example usage:
python3 viper_dces.py -p 0.4 -d ../G2P_data/train.1k --conll --odd
'''
import argparse
import random
import sys
import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_extraction.text import CountVectorizer
from perturbations_store import PerturbationsStorage
def read_data_conll(fn):
docs=[]
doc=[]
for line in open(fn):
line = line.strip()
if line=="":
if doc!=[]:
a,b=[],[]
for l in doc:
_,x,y = l.split("\t")
a.append(x)
b.append(y)
docs.append("{}\t{}".format(" ".join(a)," ".join(b)))
doc=[]
else:
doc.append(line)
if doc!=[]:
a,b=[],[]
for l in doc:
_,x,y = l.split("\t")
a.append(x)
b.append(y)
docs.append("{}\t{}".format(" ".join(a)," ".join(b)))
return docs
def read_data_standard(fn):
docs=[]
for line in open(fn):
docs.append(line)
return docs
# load the unicode descriptions into a single dataframe with the chars as indices
descs = pd.read_csv('NamesList.txt', skiprows=np.arange(16), header=None, names=['code', 'description'], delimiter='\t')
descs = descs.dropna(0)
descs_arr = descs.values # remove the rows after the descriptions
vectorizer = CountVectorizer(max_features=1000)
desc_vecs = vectorizer.fit_transform(descs_arr[:, 0]).astype(float)
vecsize = desc_vecs.shape[1]
vec_colnames = np.arange(vecsize)
desc_vecs = pd.DataFrame(desc_vecs.todense(), index=descs.index, columns=vec_colnames)
descs = pd.concat([descs, desc_vecs], axis=1)
def char_to_hex_string(ch):
return '{:04x}'.format(ord(ch)).upper()
disallowed = ['TAG', 'MALAYALAM', 'BAMUM', 'HIRAGANA', 'RUNIC', 'TAI', 'SUNDANESE', 'BATAK', 'LEPCHA', 'CHAM',
'TELUGU', 'DEVANGARAI', 'BUGINESE', 'MYANMAR', 'LINEAR', 'SYLOTI', 'PHAGS-PA', 'CHEROKEE',
'CANADIAN', 'YI', 'LYCIAN', 'HANGUL', 'KATAKANA', 'JAVANESE', 'ARABIC', 'KANNADA', 'BUHID',
'TAGBANWA', 'DESERET', 'REJANG', 'BOPOMOFO', 'PERMIC', 'OSAGE', 'TAGALOG', 'MEETEI', 'CARIAN',
'UGARITIC', 'ORIYA', 'ELBASAN', 'CYPRIOT', 'HANUNOO', 'GUJARATI', 'LYDIAN', 'MONGOLIAN', 'AVESTAN',
'MEROITIC', 'KHAROSHTHI', 'HUNGARIAN', 'KHUDAWADI', 'ETHIOPIC', 'PERSIAN', 'OSMANYA', 'ELBASAN',
'TIBETAN', 'BENGALI', 'TURKIC', 'THROWING', 'HANIFI', 'BRAHMI', 'KAITHI', 'LIMBU', 'LAO', 'CHAKMA',
'DEVANAGARI', 'ITALIC', 'CJK', 'MEDEFAIDRIN', 'DIAMOND', 'SAURASHTRA', 'ADLAM', 'DUPLOYAN'
]
disallowed_codes = ['1F1A4', 'A7AF']
# function for retrieving the variations of a character
def get_all_variations(ch):
# get unicode number for c
c = char_to_hex_string(ch)
# problem: latin small characters seem to be missing?
if np.any(descs['code'] == c):
description = descs['description'][descs['code'] == c].values[0]
else:
print('Failed to disturb %s, with code %s' % (ch, c))
return c, np.array([])
# strip away everything that is generic wording, e.g. all words with > 1 character in
toks = description.split(' ')
case = 'unknown'
identifiers = []
for tok in toks:
if len(tok) == 1:
identifiers.append(tok)
# for debugging
if len(identifiers) > 1:
print('Found multiple ids: ')
print(identifiers)
elif tok == 'SMALL':
case = 'SMALL'
elif tok == 'CAPITAL':
case = 'CAPITAL'
# for debugging
#if case == 'unknown':
# sys.stderr.write('Unknown case:')
# sys.stderr.write("{}\n".format(toks))
# find matching chars
matches = []
for i in identifiers:
for idx in descs.index:
desc_toks = descs['description'][idx].split(' ')
if i in desc_toks and not np.any(np.in1d(desc_toks, disallowed)) and \
not np.any(np.in1d(descs['code'][idx], disallowed_codes)) and \
not int(descs['code'][idx], 16) > 30000:
# get the first case descriptor in the description
desc_toks = np.array(desc_toks)
case_descriptor = desc_toks[ (desc_toks == 'SMALL') | (desc_toks == 'CAPITAL') ]
if len(case_descriptor) > 1:
case_descriptor = case_descriptor[0]
elif len(case_descriptor) == 0:
case = 'unknown'
if case == 'unknown' or case == case_descriptor:
matches.append(idx)
# check the capitalisation of the chars
return c, np.array(matches)
# function for finding the nearest neighbours of a given word
def get_unicode_desc_nn(c, perturbations_file, topn=1):
# we need to consider only variations of the same letter -- get those first, then apply NN
c, matches = get_all_variations(c)
if not len(matches):
return [], [] # cannot disturb this one
# get their description vectors
match_vecs = descs[vec_colnames].loc[matches]
# find nearest neighbours
neigh = NearestNeighbors(metric='euclidean')
Y = match_vecs.values
neigh.fit(Y)
X = descs[vec_colnames].values[descs['code'] == c]
if Y.shape[0] > topn:
dists, idxs = neigh.kneighbors(X, topn, return_distance=True)
else:
dists, idxs = neigh.kneighbors(X, Y.shape[0], return_distance=True)
# turn distances to some heuristic probabilities
#print(dists.flatten())
probs = np.exp(-0.5 * dists.flatten())
probs = probs / np.sum(probs)
# turn idxs back to chars
#print(idxs.flatten())
charcodes = descs['code'][matches[idxs.flatten()]]
#print(charcodes.values.flatten())
chars = []
for charcode in charcodes:
chars.append(chr(int(charcode, 16)))
# filter chars to ensure OOV scenario (if perturbations file from prev. perturbation contains any data...)
c_orig = chr(int(c, 16))
chars = [char for char in chars if not perturbations_file.observed(c_orig, char)]
#print(chars)
return chars, probs
parser = argparse.ArgumentParser()
parser.add_argument("-p",action="store",dest="prob")
parser.add_argument("-d",action="store",dest="docs")
parser.add_argument('--conll', dest='conll', action='store_true')
parser.add_argument('--perturbations-file', action="store", dest='perturbations_file')
parser.set_defaults(conll=False, perturbations_file='./perturbations.txt')
parsed_args = parser.parse_args(sys.argv[1:])
prob = float(parsed_args.prob)
docs = parsed_args.docs
isConll = parsed_args.conll==True
perturbations_file = PerturbationsStorage(parsed_args.perturbations_file)
if isConll:
docs=read_data_conll(docs)
output_format="conll"
else:
docs=read_data_standard(docs)
output_format="standard"
# the main loop for disturbing the text
topn=20
mydict={}
# docs = ['a b c d e f g h i j k l m n o p q r s t u v w x y z A B C D E F G H I J K L M N O P Q R S T U V W X Y Z abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ']
print_all_alternatives = False
for line in docs:
if isConll:
a,b = line.rstrip("\n").split("\t")
b = b.split()
a = a.split()
else:
a = line.rstrip("\n")
out_x = []
for c in a:
#print(c)
if c not in mydict:
similar_chars, probs = get_unicode_desc_nn(c, perturbations_file, topn=topn)
probs = probs[:len(similar_chars)]
# normalise after selecting a subset of similar characters
probs = probs / np.sum(probs)
mydict[c] = (similar_chars, probs)
else:
similar_chars, probs = mydict[c]
r = random.random()
if r<prob and len(similar_chars):
s = np.random.choice(similar_chars, 1, replace=True, p=probs)[0]
else:
s = c
out_x.append(s)
if print_all_alternatives:
print("{}\t{}".format(c, similar_chars))
if isConll:
print('idx\toriginal\tdisturbed\thex')
for i in range(len(out_x)):
print("{}\t{}\t{}\t{}".format(i+1, a[i], out_x[i], char_to_hex_string(out_x[i])))
print()
else:
print("{}".format("".join(out_x)))
| true |
ff290be9dd7300a4d2824664b7b4d0a2c0510857 | Python | alexanderjardim/crawler-sample-1 | /app/extractor.py | UTF-8 | 280 | 2.578125 | 3 | [] | no_license | from lxml import html
from urllib.parse import urlparse
def extract(page_string):
document = html.fromstring(page_string)
items = document.xpath('//img[re:test(@src, "\\.png($|\\?)", "i")]/@src', namespaces={'re': 'http://exslt.org/regular-expressions'})
return items | true |
de8458fff84c7e32f1e0edb6ab6af9e6cb2ea577 | Python | kartik-devops/Myprogs_Public | /pythonAssignment_1.py | UTF-8 | 732 | 3.890625 | 4 | [] | no_license | """
a=int(input("Enter First num : "))
b=int(input("Enter Second num : "))
c=int(input("Enter Third num : "))
print("SUM IS :")
print(a+b+c)
"""
"""a=input("Enter First string : ")
b=input("Enter Second string : ")
c=input("Enter Third string : ")
print("CONCATENATION IS : ")
print(a+" "+b+" "+c)
"""
"""
a=7
b=9
for i in range (1,11):
print(a,"*",i,"=",a*i)
print("\n")
for i in range (1,11):
print(b,"*",i,"=",b*i)
"""
"""n=int(input("Enter num to Print its table : "))
for i in range(1,11):
print(n,"*",i,"=",n*i)
"""
"""
n=int(input("Enter num to evaluate sum till that num : "))
s=0
for i in range(1,n+1):
s+=i
print("SUM is : ",s)
"""
print("Hello"+"dear"+"World")
| true |
a8b4f78ff8983610337e2f901c43284e60f9ddee | Python | nischalshrestha/automatic_wat_discovery | /Notebooks/py/tino76/assignment-2-titanic/assignment-2-titanic.py | UTF-8 | 4,917 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import skew
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
get_ipython().magic(u"config InlineBackend.figure_format = 'retina' #set 'png' here when working on notebook")
get_ipython().magic(u'matplotlib inline')
# In[ ]:
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
train.head()
# In[ ]:
unused_col = ["Name", "Ticket", "Cabin"]
train = train.drop(unused_col, axis=1)
test = test.drop(unused_col, axis=1)
# In[ ]:
numeric_feats = train.dtypes[train.dtypes != "object"].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 4.0]
skewed_feats = skewed_feats.index
train[skewed_feats] = np.log1p(train[skewed_feats])
test[skewed_feats] = np.log1p(test[skewed_feats])
train = pd.get_dummies(train)
test = pd.get_dummies(test)
train.describe()
# In[ ]:
train_x = train.drop(["PassengerId", "Survived"], axis=1)
train_y = train["Survived"]
train = (train_x - train_x.mean()) / (train_x.max() - train_x.min())
train["Survived"] = train_y
test_id = test["PassengerId"]
test = test.drop("PassengerId", axis=1)
test = (test - train_x.mean()) / (train_x.max() - train_x.min())
test["PassengerId"] = test_id
#filling NA's with the mean of the column:
train = train.fillna(train.mean())
test = test.fillna(train.mean())
# In[ ]:
train_x = train.drop("Survived", axis=1)
train_y = train["Survived"]
X_tr, X_val, y_tr, y_val = train_test_split(train_x, train_y, test_size=0.25)
# **Linear Model -Training**
# In[ ]:
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import accuracy_score
c = list(np.power(10.0, np.arange(-3, 3)))
model_log = LogisticRegressionCV(cv=10, solver='lbfgs', scoring='accuracy', penalty='l2', Cs=c)
model_log.fit(X_tr, y_tr)
predict = model_log.predict(X_tr)
score = accuracy_score(y_tr, predict)
print("Training accuracy: %.2f%%" % (score * 100))
predict = model_log.predict(X_val)
score = accuracy_score(y_val, predict)
print("Validation accuracy: %.2f%%" % (score * 100))
# In[ ]:
coef = pd.Series(model_log.coef_[0,:], index = X_tr.columns)
print("LogisticRegression picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables")
imp_coef = pd.concat([coef.sort_values()])
imp_coef.plot(kind = "barh")
plt.title("Coefficients in the LogisticRegression")
# **Linear Model - Predict**
# In[ ]:
predict = model_log.predict(test.drop("PassengerId", axis=1))
solution = pd.DataFrame({"PassengerId":test["PassengerId"], "Survived":predict})
solution.to_csv("log_sol.csv", index = False)
# **Tree Based Model - Training**
# In[ ]:
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
xgb = XGBClassifier(booster='gbtree')
xgb.fit(X_tr, y_tr)
predict = xgb.predict(X_tr)
score = accuracy_score(y_tr, predict)
print("Training accuracy: %.2f%%" % (score * 100))
predict = xgb.predict(X_val)
score = accuracy_score(y_val, predict)
print("Validation accuracy: %.2f%%" % (score * 100))
# **Tree Model - Predict**
# In[ ]:
predict = xgb.predict(test.drop("PassengerId", axis=1))
solution = pd.DataFrame({"PassengerId":test["PassengerId"], "Survived":predict})
solution.to_csv("xgb_sol.csv", index = False)
# **Neural Network - Training**
# In[ ]:
from keras.layers import Dense
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
model = Sequential()
model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu', input_dim = 10))
BatchNormalization
model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu'))
BatchNormalization
model.add(Dense(units = 5, kernel_initializer = 'uniform', activation = 'relu'))
BatchNormalization
model.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
BatchNormalization
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(X_tr, y_tr, batch_size = 32, epochs = 200)
# In[ ]:
predict = model.predict(X_tr)
predict = (predict > 0.5).astype(int).reshape(X_tr.shape[0])
score = accuracy_score(y_tr, predict)
print("Training accuracy: %.2f%%" % (score * 100))
predict = model.predict(X_val)
predict = (predict > 0.5).astype(int).reshape(X_val.shape[0])
score = accuracy_score(y_val, predict)
print("Validation accuracy: %.2f%%" % (score * 100))
# **Neural Network - Predict**
# In[ ]:
predict = model.predict(test.drop("PassengerId", axis=1))
predict = (predict > 0.5).astype(int).reshape(test.shape[0])
solution = pd.DataFrame({"PassengerId":test["PassengerId"], "Survived":predict})
solution.to_csv("nn_sol.csv", index = False)
| true |
a1ed13b97c21c96e007b744a32d186976729c448 | Python | alardosa/python-hero | /amP7tf.py | UTF-8 | 279 | 3.703125 | 4 | [] | no_license | def square(n):
result = n**2
return result
print(square(2))
# output: 4
# simplify
def square(n):
return n**2
print(square(2))
# output: 4
# simplify
def square(n): return n**2
print(square(2))
# output: 4
square = lambda num: num ** 2
print(square(2))
# output: 4 | true |
0afcfc6adac53dc421a5d8a914edb47996cc9136 | Python | ZhengKeli/OpticalInterpretation | /model_chemical.py | UTF-8 | 3,800 | 2.875 | 3 | [] | no_license | import braandket as bnk
from components import Cavity, Band
from utils import eig_map
class Atom(bnk.KetSpace):
""" Atom with 2 electron orbits, having 4 possible states. """
def __init__(self, potential_0=0.0, potential_1=-1.0, potential_2=-4.0, potential_3=-5.0, name=None):
super().__init__(4, name)
self.potential = bnk.sum(
potential_0 * self.projector(0),
potential_1 * self.projector(1),
potential_2 * self.projector(2),
potential_3 * self.projector(3),
)
def transition(self, g_01, g_12, g_23, c_01: Cavity, c_12: Cavity, c_23: Cavity, tp: Band):
return bnk.sum_ct(
g_01 * c_01.increase @ self.operator(1, 0) @ tp.decrease,
g_12 * c_12.increase @ self.operator(2, 1),
g_23 * c_23.increase @ self.operator(3, 2) @ tp.decrease,
)
@staticmethod
def electrons(i):
return [
(0, 0),
(0, 1),
(1, 0),
(1, 1),
][i]
class ChemicalModel:
""" Chemical model """
def __init__(
self,
potential_0=0.0, potential_1=-1.0, potential_2=-4.0, potential_3=-5.0,
g_01=0.02, g_12=0.02, g_23=0.02,
gamma_01=0.002, gamma_12=0.002, gamma_23=0.002,
hb=1.0):
at0 = Atom(potential_0, potential_1, potential_2, potential_3, name='at0')
at1 = Atom(potential_0, potential_1, potential_2, potential_3, name='at1')
tp = Band(3, potential=potential_0, name='tp')
c01 = Cavity(3, energy=(potential_0 - potential_1), name='c01')
c12 = Cavity(3, energy=(potential_1 - potential_2), name='c12')
c23 = Cavity(2, energy=(potential_2 - potential_3), name='c23')
self.at0 = at0
self.at1 = at1
self.tp = tp
self.c01 = c01
self.c12 = c12
self.c23 = c23
hmt_energy = bnk.sum([
at0.potential,
at1.potential,
tp.potential,
c01.energy,
c12.energy,
c23.energy,
])
hmt_interact = bnk.sum(
at0.transition(g_01, g_12, g_23, c01, c12, c23, tp),
at1.transition(g_01, g_12, g_23, c01, c12, c23, tp),
)
hmt = hmt_energy + hmt_interact
gamma, deco = zip(
(gamma_01, c01.decrease),
(gamma_12, c12.decrease),
(gamma_23, c23.decrease),
)
self.hb = hb
self.hmt = hmt
self.gamma = gamma
self.deco = deco
def eigenstate(self, at0, at1, tp, c01, c12, c23):
return bnk.prod(
self.at0.eigenstate(at0),
self.at1.eigenstate(at1),
self.tp.eigenstate(tp),
self.c01.eigenstate(c01),
self.c12.eigenstate(c12),
self.c23.eigenstate(c23),
)
class PrunedChemicalModel:
def __init__(self, org: ChemicalModel, initial):
space = bnk.PrunedKetSpace.from_initial(initial, [org.hmt, org.deco])
self.org = org
self.space = space
self.hb = org.hb
self.hmt = space.prune(org.hmt)
self.gamma = org.gamma
self.deco = space.prune(org.deco)
def labels(self):
model = self.org
labels = []
for i, psi in enumerate(self.space.org_eigenstates):
em = eig_map(psi)
label = f"$" \
f"|{em[model.at0]}\\rangle_{{at0}} " \
f"|{em[model.at1]}\\rangle_{{at1}} " \
f"|{em[model.tp]}\\rangle_{{tp}} " \
f"|{em[model.c01]}{em[model.c12]}{em[model.c23]}\\rangle_{{\\omega_{{01}}\\omega_{{12}}\\omega_{{23}}}} " \
f"$"
labels.append(label)
return tuple(labels)
| true |
aa2d044d06d2e3be6f263d069c1502c72c56c758 | Python | 10to8/django-tastypie-extendedmodelresource | /example/api/tests.py | UTF-8 | 3,469 | 2.625 | 3 | [] | no_license | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.contrib.auth.models import User
from django.test import LiveServerTestCase
import requests
import simplejson
from api.models import Entry
class SimpleTest(LiveServerTestCase):
fixtures = 'initial_data'
def test_get_list(self):
result = requests.get(self.live_server_url + '/api/v1/user/2/entries/')
self.assertEqual(result.status_code, 200)
self.assertEqual(len(simplejson.loads(result.text)['objects']), 1)
def test_get_list_filter(self):
user = User.objects.get(id=2)
e = Entry(user=user, title='filter_me')
e.save()
result = requests.get(self.live_server_url + '/api/v1/user/2/entries/?title__startswith=filter')
self.assertEqual(result.status_code, 200)
self.assertEqual(len(simplejson.loads(result.text)['objects']), 1)
result = requests.get(self.live_server_url + '/api/v1/user/2/entries/')
self.assertEqual(result.status_code, 200)
self.assertEqual(len(simplejson.loads(result.text)['objects']), 2)
def test_get_detail(self):
result = requests.get(self.live_server_url + '/api/v1/user/2/entries/1/')
self.assertEqual(result.status_code, 200)
def test_post_detail_does_not_exist(self):
headers = {'Content-Type': 'application/json',}
request_url = self.live_server_url + '/api/v1/user/2/entries/9999/'
data = {'body': "hello, this is the body"}
result = requests.put(request_url, data=simplejson.dumps(data), headers=headers)
self.assertEqual(result.status_code, 400)
def test_post_detail(self):
headers = {'Content-Type': 'application/json',}
old_entry = Entry.objects.get(id=1)
request_url = self.live_server_url + '/api/v1/user/2/entries/1/'
data = {'body': "hello, this is the body"}
result = requests.put(request_url, data=simplejson.dumps(data), headers=headers)
self.assertEqual(result.status_code, 204) # Updated
entry = Entry.objects.get(id=1)
self.assertEqual(entry.body, data['body'])
for key in entry.__dict__.keys():
if not key == 'body' and not key.startswith('_'):
self.assertEqual(entry.__dict__[key], old_entry.__dict__[key])
def test_post_detail_non_relation(self):
new_entry = Entry(body='test', title='test', user=User.objects.get(id=1))
new_entry.save()
headers = {'Content-Type': 'application/json',}
request_url = self.live_server_url + '/api/v1/user/2/entries/%s/' % new_entry.id
data = {'body': "hello, this is the body"}
result = requests.put(request_url, data=simplejson.dumps(data), headers=headers)
self.assertEqual(result.status_code, 400)
def test_post_list(self):
headers = {'Content-Type': 'application/json',}
request_url = self.live_server_url + '/api/v1/user/2/entries/'
data = {'body': "hello, this is the body"}
result = requests.post(request_url, data=simplejson.dumps(data), headers=headers)
self.assertEqual(result.status_code, 201) # Created
entry = Entry.objects.all()
entry = entry[len(entry)-1]
self.assertEqual(entry.body, data['body'])
self.assertEqual(entry.user, User.objects.get(id=2)) | true |
7053a62ef4f15977b4a6b945c292937307b6ee9b | Python | Greek-and-Roman-God/Athena | /codingtest/week08/level11_brute_force/black_jack.py | UTF-8 | 338 | 2.828125 | 3 | [] | no_license | # 블랙잭
n, m=map(int, input().split())
cards=list(map(int,input().split()))
sum_list=[0]*n
for idx,card in enumerate(cards):
for idx2,card2 in enumerate(cards[idx+1:]):
sum=card+card2
for card3 in cards[idx+idx2+2:]:
if sum+card3<=m and sum+card3>sum_list[idx]:
sum_list[idx]=sum+card3
print(max(sum_list))
| true |
8c23b56fcbfdfd4201f8bde48c712264997b19bf | Python | Askhat-Bukeyev/my_robotiq | /robotiq_USB.py | UTF-8 | 6,344 | 2.84375 | 3 | [] | no_license | # Python class to connect and control Robotiq 3-f gripper
# Writing with python 3.5
class Robotiq_USB:
def __init__(self, LINUX_PORT = '/dev/ttyUSB1',WIN_PORT = 'COM6' , my_baudrate=115200):
import sys
import serial
# Variables
self.gripper_status = [] #
self.current = [0,0,0] # Last current value in finger A,B,C
self.position = [0,0,0] # Last position of finger A,B,C
self.echo_position = [0,0,0] # The position request echo tells that the command was well received
# Create connection via PC and gripper. Depends on running OS
try:
if sys.platform.startswith('linux'):
self.stream = serial.Serial( port = LINUX_PORT, baudrate = my_baudrate, timeout=1, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)
elif sys.platform.startswith('win'):
self.stream = serial.Serial( port = WIN_PORT, baudrate = my_baudrate, timeout=1, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS)
except ( ValueError, SerialException):
self.stream.close()
print('dad')
raise ConnectionError
self.update_status()
# Activate gripper
if self.gripper_status[4] != 1:
status_counter = 0
self.send_command([b"\x09\x10\x03\xE8\x00\x03\x06\x01\x00\x00\x00\x00\x00\x72\xE1"])
while self.gripper_status[1] != 3:
self.update_status()
if status_counter == 10000:
self.stream.close()
raise TimeoutError
def str_to_bin(self,my_str):
import binascii
return binascii.unhexlify(my_str)
def update_status(self):
# Updates grippers status, position, current
import binascii
import serial
self.stream.write(b"\x09\x03\x07\xD0\x00\x08\x45\xC9")
data = binascii.hexlify(self.stream.readline())
data_status = format(int(data[6:10],16),'0>16b')
data_status = [data_status[0:2],data_status[2:4],data_status[4],data_status[5:7],data_status[7],
data_status[8:10],data_status[10:12],data_status[12:14],data_status[14:16]]
self.gripper_status = [ int(i,2) for i in data_status ]
self.current = [data[16:18],data[22:24],data[28:30]]
self.current = [ int(i,16) for i in self.current ]
self.echo_position = [data[12:14],data[18:20],data[24:26]]
self.echo_position = [ int(i,16) for i in self.echo_position ]
self.position = [data[14:16],data[20:22],data[26:28]]
self.position = [ int(i,16) for i in self.position ]
return data_status
def move(self,mode,pos,speed,force):
mode = format(mode, '0>2b')
msg_mode = "{0:0>2X}".format(int('00001' + mode + '1', 2))
msg_pos = format(hex(pos)[2:],'0>2')
msg_speed = format(hex(speed)[2:], '0>2')
msg_force = format(hex(force)[2:], '0>2')
check = self.crc16('091003E8000306' + msg_mode + '0000' + msg_pos + msg_speed + msg_force)
self.send_command([b"\x09\x10\x03\xE8\x00\x03\x06", self.str_to_bin(msg_mode),
b"\x00\x00", self.str_to_bin(msg_pos),self.str_to_bin(msg_speed), self.str_to_bin(msg_force), check])
def close(self,mode = None, pos = 255, speed = 255, force = 255):
# Closes gripper on basic configuration.
if mode is None:
mode = self.gripper_status[3]
self.move(mode,pos,speed,force)
self.update_status()
def open(self,mode = None, pos = 0, speed = 255, force = 255):
# Opens gripper on basic configuration.
if mode is None:
mode = self.gripper_status[3]
self.move(mode, pos, speed, force)
self.update_status()
def check_object(self):
while True:
self.update_status()
if self.gripper_status[0] == 3: return False
elif self.gripper_status[0] == 0: continue
else: return True
def set_mode(self,mode):
mode = format(mode,'0>2b')
full_mode = "{0:0>2X}".format(int('00001' + mode + '1',2)) + '00'
check = self.crc16('090603E8' + full_mode )
self.send_command([b'\x09\x06\x03\xE8', self.str_to_bin(full_mode),check])
self.update_status()
while self.gripper_status[1] != 3:
self.update_status()
def deactivate(self):
check = self.crc16('090603E80000')
self.send_command([b'\x09\x06\x03\xE8\x00\x00',check])
def send_command(self,array):
import serial
for item in array:
self.stream.write(item)
self.stream.readline()
def exit(self):
import serial
self.stream.close()
def crc16(self, data, bits=8):
crc = 0xFFFF
for op, code in zip(data[0::2], data[1::2]):
crc = crc ^ int(op + code, 16)
for bit in range(0, bits):
if (crc & 0x0001) == 0x0001:
crc = ((crc >> 1) ^ 0xA001)
else:
crc = crc >> 1
return self.typecasting(crc)
def typecasting(self, crc):
msb = hex(crc >> 8)
lsb = hex(crc & 0x00FF)
msg = (lsb + msb).split('0x')
res = "{0:0>2}".format(msg[1]) + "{0:0>2}".format(msg[2])
return self.str_to_bin(res)
def move_finger(self, positions, speeds, forces, scissors = False):
msg_pos = [format(hex(a)[2:], '0>2') for a in positions]
msg_speed = [format(hex(a)[2:], '0>2') for a in speeds]
msg_force = [format(hex(a)[2:], '0>2') for a in forces]
if scissors: msg_mode = '0908'
else: msg_mode = '0904'
msg = '00' + msg_pos[0] + msg_speed[0] + msg_force[0] + \
msg_pos[1] + msg_speed[1] + msg_force[1] + \
msg_pos[2] + msg_speed[2] + msg_force[2] + \
msg_pos[3] + msg_speed[3] + msg_force[3] + '00'
check = self.crc16('091003E8000810' + msg_mode + msg)
self.send_command([b"\x09\x10\x03\xE8\x00\x08\x10", self.str_to_bin(msg_mode),
self.str_to_bin(msg), check]) | true |
b68c0ce4167007947ba148041d606344926fa180 | Python | luyajie/stockmining | /stock charting/basicgraph.py | UTF-8 | 6,534 | 2.890625 | 3 | [] | no_license | import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
# change font of the labels
import matplotlib
matplotlib.rcParams.update({'font.size': 9})
# candle stick
from matplotlib.finance import candlestick_ochl
stocks = 'AAPL',
def movingaverage(values,window):
weights = np.repeat(1.0, window)/window
smas = np.convolve(values, weights, 'valid')
return smas
def graphData(stock):
try:
stockFile = 'dataDaily/'+stock+'.txt'
date,closep,highp,lowp,openp,volume = np.loadtxt(stockFile,delimiter=',',unpack=True,
converters={0:mdates.strpdate2num('%Y%m%d')})
fig = plt.figure()
# subplot 1: price
#ax1 = plt.subplot(2,1,1)#(2,3,6), 2X3, and at place 6
ax1 = plt.subplot2grid((5,4),(0,0),rowspan=4,colspan=4)
ax1.plot(date,openp)
ax1.plot(date,highp)
ax1.plot(date,lowp)
ax1.plot(date,closep)
# label
#plt.xlabel('Date')
plt.ylabel('Price ($)')
plt.title(stock)
# set tick label
plt.setp(ax1.get_xticklabels(),visible=False)
#grid
ax1.grid(True)
ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
#ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
# subplot 2: volume
#ax2 = plt.subplot(2,1,2,sharex=ax1) #share axis, sync the axis
ax2 = plt.subplot2grid((5,4),(4,0),sharex=ax1,rowspan=1,colspan=4)
# remove y axis tick lable for subplot2
ax2.axes.yaxis.set_ticklabels([])
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax2.bar(date,volume)
ax2.grid(True)
plt.xlabel('Date')
plt.ylabel('Volume')
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
#adjust plot spaces
plt.subplots_adjust(left=.09,bottom=.17,right=.93,top=.93,wspace=.20,hspace=.00)
plt.show()
fig.savefig('example.png')
except Exception, e:
print 'failed main loop',str(e)
def candlestickData(stock,MA1,MA2):
try:
stockFile = 'dataDaily/'+stock+'.txt'
date,closep,highp,lowp,openp,volume = np.loadtxt(stockFile,delimiter=',',unpack=True,
converters={0:mdates.strpdate2num('%Y%m%d')})
# use while loop to build candlestick DATA set
x = 0
y = len(date)
candleAr = []
while x < y:
appendLine = date[x],openp[x],closep[x],highp[x],lowp[x],volume[x]
candleAr.append(appendLine)
x+=1
# generate moving average
Av1 = movingaverage(closep, MA1)
Av2 = movingaverage(closep, MA2)
SP = len(date[MA2-1:]) # starting point
#
label1 = str(MA1)+' SMA'
label2 = str(MA2)+' SMA'
#print len(Av1),len(Av2),len(date)
#print len(date),SP
#print date[-SP:],Av1[-SP:]
#time.sleep(5)
#fig = plt.figure()
fig = plt.figure(facecolor='#07000d') # background color
# subplot 1: price
#ax1 = plt.subplot(2,1,1)#(2,3,6), 2X3, and at place 6
ax1 = plt.subplot2grid((5,4),(0,0),rowspan=4,colspan=4,axisbg='#07000d')
# candle stick plot
# make change to vline in candlestick()
#candlestick(ax1,candleAr,width=0.8,colorup='#9eff15',colordown='#ff1717')
candlestick_ochl(ax1,candleAr,width=0.8,colorup='#9eff15',colordown='#ff1717')
#plot moving average
ax1.plot(date[-SP:],Av1[-SP:],'#6998ff',label=label1,linewidth=1.5)
ax1.plot(date[-SP:],Av2[-SP:],'#e1edf9',label=label2,linewidth=1.5)
#ax1.plot(date[MA1-1:],Av1[:],'#6998ff',label=label1,linewidth=1.5)
#ax1.plot(date[MA2-1:],Av2[:],'#e1edf9',label=label2,linewidth=1.5)
#
ax1.yaxis.label.set_color('w')
ax1.spines['bottom'].set_edgecolor('#5998ff')
ax1.spines['top'].set_edgecolor('#5998ff')
ax1.spines['left'].set_edgecolor('#5998ff')
ax1.spines['right'].set_edgecolor('#5998ff')
ax1.tick_params(axis='y',colors='w')
# label
#plt.xlabel('Date')
plt.ylabel('Price ($)',color='w')
plt.suptitle(stock,color='w')
# plot legend
plt.legend(loc=2,prop={'size':6})
# fill color
#volumeMin = volume.min()
volumeMin = 0
# set tick label
plt.setp(ax1.get_xticklabels(),visible=False)
#grid
ax1.grid(True,color='w')
ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
#ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
# subplot 2: volume
#ax2 = plt.subplot(2,1,2,sharex=ax1) #share axis, sync the axis
ax2 = plt.subplot2grid((5,4),(4,0),sharex=ax1,rowspan=1,colspan=4,axisbg='#07000d') # shared axis
# remove y axis tick lable for subplot2
ax2.axes.yaxis.set_ticklabels([])
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
#
ax2.yaxis.label.set_color('w')
ax2.spines['bottom'].set_edgecolor('#5998ff')
ax2.spines['top'].set_edgecolor('#5998ff')
ax2.spines['left'].set_edgecolor('#5998ff')
ax2.spines['right'].set_edgecolor('#5998ff')
ax2.tick_params(axis='x',colors='w')
ax2.tick_params(axis='y',colors='w')
#ax2.bar(date,volume)
ax2.plot(date,volume,'#00ffe8',linewidth=.8)
ax2.fill_between(date,volumeMin,volume,facecolor='#00ffe8',alpha=.5)
ax2.grid(False)
plt.xlabel('Date')
plt.ylabel('Volume')
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
#adjust plot spaces
plt.subplots_adjust(left=.09,bottom=.17,right=.93,top=.93,wspace=.20,hspace=.00)
plt.show()
fig.savefig('example.png',facecolor=fig.get_facecolor())
except Exception, e:
print 'failed main loop',str(e)
for eachStock in stocks:
#graphData(eachStock)
candlestickData(eachStock,12,26)
#time.sleep(5)
| true |
bafcff98f24fc6e229b92d24bfbf7d737a260cb9 | Python | uenewsar/nlp100fungos | /chap1/02.py | UTF-8 | 350 | 3.6875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# 02. 「パトカー」+「タクシー」=「パタトクカシーー」
# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.
a = "パトカー"
b = "タクシー"
c = ''
for i in range(len(a)):
c += a[i]
c += b[i]
print(c)
| true |
f20935e0cd5d6c9ccbadff570790fac797a28205 | Python | ushiko/AOJ | /ALDS1/ALDS1_4_A.py | UTF-8 | 485 | 2.546875 | 3 | [] | no_license | # http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ALDS1_4_A&lang=jp
# Linear Search : python3
# 2018.12.08 yonezawa
#from collections import deque
import sys
input = sys.stdin.readline
#import cProfile
def main():
n1 = int(input())
s1 = set(map(int,input().split()))
n2 = int(input())
s2 = set(map(int,input().split()))
print (len(s1 & s2))
if __name__ == '__main__':
main()
# pr = cProfile.Profile()
# pr.runcall(main)
# pr.print_stats() | true |
7ec2e322ce2ef332f7713ea1b995a75f808b7df7 | Python | picaindahouse/Code-Wars-Projects | /The Beginning- 1 to 20/Side Project 19- Two Sums/Retry after NS 19/Redo 19- Idle.py | UTF-8 | 120 | 2.8125 | 3 | [] | no_license | def two_sum (tom, n):
return [[i,j] for i,x in enumerate(tom) for j,y in enumerate(tom) if x + y == n and i != j][0] | true |
4e34965373351d238b063c5ba410b265b65a28a3 | Python | rajash/Machine-learning | /regression/boston.py | UTF-8 | 671 | 2.78125 | 3 | [] | no_license | import numpy as np
from featureScaling import featureScale
from regression import Regression
from sklearn.datasets import load_boston
boston = load_boston()
X = boston['data']
y = boston['target']
feature_names = boston['feature_names']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
reg = Regression()
reg.gradientDescent(X_train, y_train, None, 0.05)
print('test data: ',y_test[10:15])
print('predicted data: ',reg.predict(X_test[10:15]))
reg.normalEquation(X_train, y_train)
print('test data: ',y_test[10:15])
print('predicted data: ',reg.predict(X_test[10:15]))
| true |
d5a28e3e18cca4272dbb9707a49c1b68f2b2e187 | Python | andreassjoberg/advent-of-code-2017 | /day22.py | UTF-8 | 2,762 | 3.765625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""Day 22 of advent of code"""
def build_grid(input_lines, size):
"""Builds grid"""
grid = []
for y in range(size):
row = []
for x in range(size):
row.append('.')
grid.append(row)
input_length = len(input_lines[0])
start_x = start_y = (size / 2) - (input_length / 2)
for y in range(input_length):
for x in range(len(input_lines[y])):
grid[start_y + y][start_x + x] = input_lines[y][x]
return grid
def print_grid(grid, current_x, current_y):
"""Prints grid"""
for y in range(len(grid)):
for x in range(len(grid[y])):
if y == current_y and x == current_x:
print '[' + grid[y][x] + ']',
else:
print ' ' + grid[y][x] + ' ',
print
def part_one(data):
"""Part one"""
size = 1000
grid = build_grid(data.splitlines(), size)
current_x = current_y = size / 2
num_infections = 0
# 0
# 1 3
# 2
direction = 0
for _ in range(10000):
if grid[current_y][current_x] == '#':
direction = (direction - 1) % 4
grid[current_y][current_x] = '.'
else:
direction = (direction + 1) % 4
grid[current_y][current_x] = '#'
num_infections += 1
if direction == 0:
current_y -= 1
elif direction == 1:
current_x -= 1
elif direction == 2:
current_y += 1
elif direction == 3:
current_x += 1
return num_infections
def part_two(data):
"""Part two"""
size = 1000
grid = build_grid(data.splitlines(), size)
current_x = current_y = size / 2
num_infections = 0
# 0
# 1 3
# 2
direction = 0
for _ in range(10000000):
if grid[current_y][current_x] == '#':
direction = (direction - 1) % 4
grid[current_y][current_x] = 'F'
elif grid[current_y][current_x] == '.':
direction = (direction + 1) % 4
grid[current_y][current_x] = 'W'
elif grid[current_y][current_x] == 'W':
grid[current_y][current_x] = '#'
num_infections += 1
elif grid[current_y][current_x] == 'F':
direction = (direction + 2) % 4
grid[current_y][current_x] = '.'
if direction == 0:
current_y -= 1
elif direction == 1:
current_x -= 1
elif direction == 2:
current_y += 1
elif direction == 3:
current_x += 1
return num_infections
if __name__ == '__main__':
with open('day22.input', 'r') as f:
INPUT_DATA = f.read()
print part_one(INPUT_DATA)
print part_two(INPUT_DATA)
| true |
efc5fc0d7e206532d71d4f22c665b5588869128e | Python | bzhulex/CS3251_PA2 | /p2pclient.py | UTF-8 | 27,396 | 2.84375 | 3 | [] | no_license | """
Follow the instructions in each method and complete the tasks. We have given most of the house-keeping variables
that you might require, feel free to add more if needed. Hints are provided in some places about what data types
can be used, others are left to students' discretion, make sure that what you are returning from one method gets correctly
interpreted on the other end. Most functions ask you to create a log, this is important
as this is what the auto-grader will be looking for.
Follow the logging instructions carefully.
"""
"""
Appending to log: every time you have to add a log entry, create a new dictionary and append it to self.log. The dictionary formats for diff. cases are given below
Registraion: (R)
{
"time": <time>,
"text": "Client ID <client_id> registered"
}
Unregister: (U)
{
"time": <time>,
"text": "Unregistered"
}
Fetch content: (Q)
{
"time": <time>,
"text": "Obtained <content_id> from <IP>#<Port>
}
Purge: (P)
{
"time": <time>,
"text": "Removed <content_id>"
}
Obtain list of clients known to a client: (O)
{
"time": <time>,
"text": "Client <client_id>: <<client_id>, <IP>, <Port>>, <<client_id>, <IP>, <Port>>, ..., <<client_id>, <IP>, <Port>>"
}
Obtain list of content with a client: (M)
{
"time": <time>,
"text": "Client <client_id>: <content_id>, <content_id>, ..., <content_id>"
}
Obtain list of clients from Bootstrapper: (L)
{
"time": <time>,
"text": "Bootstrapper: <<client_id>, <IP>, <Port>>, <<client_id>, <IP>, <Port>>, ..., <<client_id>, <IP>, <Port>>"
}
"""
import socket
import time
import json
from enum import Enum
import random
import pickle
import threading
import struct
class Status(Enum):
INITIAL = 0
REGISTERED = 1
UNREGISTERED = 2
class p2pclient:
def __init__(self, client_id, content, actions):
##############################################################################
# TODO: Initialize the class variables with the arguments coming #
# into the constructor #
##############################################################################
self.client_id = client_id
self.content = content
self.actions = actions # this list of actions that the client needs to execute
#for act in actions:
# print(" "+json.dumps(act))
#self.curr_time = 1
self.content_originator_list = None # This needs to be kept None here, it will be built eventually
self.content_originator_list = {}
# 'log' variable is used to record the series of events that happen on the client
# Empty list for now, update as we take actions
# See instructions above on how to append to log
self.log = []
##############################################################################
# TODO: You know that in a P2P architecture, each client acts as a client #
# and the server. Now we need to setup the server socket of this client#
# Initialize the the self.socket object on a random port, bind to the port#
# Refer to #
# https://docs.python.org/3/howto/sockets.html on how to do this. #
##############################################################################
time.sleep(.1)
self.p2pclientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
random.seed(client_id)
self.port = random.randint(9000, 9999)
self.p2pclientsocket.bind(('127.0.0.1', self.port))
##############################################################################
# TODO: Register with the bootstrapper by calling the 'register' function #
# Make sure you communicate the server #
# port that this client is running on to the bootstrapper. #
##############################################################################
self.status = Status.INITIAL
self.register(0)
self.status = Status.REGISTERED
#register may need to be adjusted here to make sure the server calls the client by the correct id?
# data = pickle.loads(self.bootstrapperSocket.recv(1024))
# if data == 'START':
# self.start()
##############################################################################
# TODO: You can set status variable based on the status of the client: #
# Initial: if not yet initialized a connection to the bootstrapper #
# Registered: if registered to bootstrapper #
# Unregistered: unregistred from bootstrapper, but still active #
# Feel free to add more states if you need to #
# HINT: You may find enum datatype useful #
##############################################################################
#self.start()
def start_listening(self):
##############################################################################
# TODO: This function will make the client start listening on the randomly #
# chosen server port. Refer to #
# https://docs.python.org/3/howto/sockets.html on how to do this. #
# You will need to link each connecting client to a new thread (using #
# client_thread function below) to handle the requested action. #
##############################################################################
#code added by Anna Gardner
self.p2pclientsocket.listen()
while True:
# accept connections from outside
#print('listening')
(clientsocket, (ip, port)) = self.p2pclientsocket.accept()
#print('accepted')
#print(" client ip and port "+ip + " " + str(port))
clientThread = threading.Thread(target = self.client_thread, args = (clientsocket, ip, port))
clientThread.start()
#end of code added by Anna
def client_thread(self, clientsocket, ip, port):
##############################################################################
# TODO: This function should handle the incoming connection requests from #
# other clients.You are free to add more arguments to this function #
# based your need #
# HINT: After reading the input from the buffer, you can decide what #
# action needs to be done. For example, if the client is requesting #
# list of known clients, you can return the output of self.return_list_of_known_clients #
##############################################################################
while True:
data = clientsocket.recv(1024).decode('utf-8')
data = data.replace('"', '')
if data:
print("~~~~~p2pclient_thread data: " +data)
data_arr = data.split(" ")
#client_id = data_arr[0]
data = data_arr[1]
ip = data_arr[2]
port = data_arr[3]
if data == 'START':
#print('start was called')
self.start()
if data == 'knownClientsPlease' :
client_list = self.return_list_of_known_clients()
toSend = json.dumps(client_list)
clientsocket.send(toSend.encode('utf-8'))
elif data == 'contentList' :
content_list = self.return_content_list()
#sorted_list = sorted(content_list, key=lambda x: x[0])
toSend = json.dumps(content_list)
print("~~~~~got send content list flag sending: "+ str(self.client_id) +" " +toSend)
clientsocket.send(toSend.encode('utf-8'))
def register(self, curr_time, ip='127.0.0.1', port=8888):
##############################################################################
# TODO: Register with the bootstrapper. Make sure you communicate the server#
# port that this client is running on to the bootstrapper. #
# Append an entry to self.log that registration is successful #
##############################################################################
#if self.status == Status.INITIAL:
bootstrapperSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bootstrapperSocket.connect((ip, port))
#data = pickle.loads(clientsocket.recv())
#self.client_id = data
toSend = str(str(self.client_id) + ' register '+ str(ip) +' '+str(self.port))
bootstrapperSocket.send(toSend.encode('utf-8'))
bootstrapperSocket.close()
if curr_time != 0:
register_dict = {}
register_dict["time"] = curr_time
register_dict["text"] = str("Client ID " +str(self.client_id)+" registered")
self.log.append(register_dict)
def deregister(self, curr_time, ip='127.0.0.1', port=8888):
##############################################################################
# TODO: Deregister with the bootstrapper #
# Append an entry to self.log that deregistration is successful #
##############################################################################
bootstrapperSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bootstrapperSocket.connect((ip, port))
toSend = str(str(self.client_id) + ' deregister '+ str(ip) +' '+str(self.port))
# var = struct.pack('i', len(toSend))
# self.bootstrapperSocket.send(var)
bootstrapperSocket.send(toSend.encode('utf-8'))
bootstrapperSocket.close()
dereg_dict = {}
dereg_dict["time"] = curr_time
dereg_dict["text"] = "Unregistered"
self.log.append(dereg_dict)
#print("DEREGISTER FINISHED")
def start(self):
##############################################################################
# TODO: The Bootstrapper will call this method of the client to indicate #
# that it needs to start its actions. Once this is called, you have to#
# start reading the items in self.actions and start performing them #
# sequentially, at the time they have been scheduled for. #
# HINT: You can use time library to schedule these. #
##############################################################################
##############################################################################
# TODO: ***IMPORTANT*** #
# At the end of your actions, “export” self.log to a file: client_x.json, #
# this is what the autograder is looking for. Python’s json package should #
# come handy. #
##############################################################################
start = time.time()
action_num = 0
while action_num < len(self.actions):
while_start = time.time()
# while self.actions[action_num]["time"] < time_diff:
# pass
curr_time = self.actions[action_num]["time"]
print("CLIENT "+ str(self.client_id)+" ACTION NUM: "+str(action_num) + " CODE: " + self.actions[action_num]["code"]+ " CURR TIME " + str(curr_time))
#print("ACTION NUM: "+str(action_num) + "CURR TIME " + str(curr_time))
code = self.actions[action_num]["code"]
if code == "R":
self.register(curr_time)
elif code == "U":
self.deregister(curr_time)
elif code == "Q":
self.request_content(self.actions[action_num]["content_id"], curr_time)
elif code == "P":
self.purge_content(self.actions[action_num]["content_id"], curr_time)
elif code == "O":
#print("time: "+str(curr_time)+" client "+ str(self.client_id) + " O: query_client_for_known_client "+ str(self.actions[action_num]["client_id"]))
self.query_client_for_known_client(self.actions[action_num]["client_id"], curr_time)
elif code == "M":
#print("time: "+str(curr_time)+" client "+ str(self.client_id) + " M: query_client_for_content_list "+ str(self.actions[action_num]["client_id"]))
self.query_client_for_content_list(self.actions[action_num]["client_id"], curr_time)
elif code == "L":
self.query_bootstrapper_all_clients(curr_time)
action_num += 1
while_end = time.time()
time.sleep(1.5 - (while_end-while_start))
string = "client_" + str(self.client_id) + ".json"
outfile = open(string, "w")
json.dump(self.log, outfile)
outfile.close()
# def clean_client_list(self, toReturn):
# count = 0
# var = '<'
# for client in toReturn:
# var += '<'
# for i in range(len(client)):
# piece = client[i]
# if i < len(client) - 1:
# var += str(piece)+', '
# else:
# var += str(piece)
# i += 1
# if count < len(toReturn) - 1:
# var += ">, "
# else:
# var += ">"
# count += 1
# var += '>'
# return var
def query_bootstrapper_all_clients(self, curr_time, log = True, ip='127.0.0.1', port=8888):
##############################################################################
# TODO: Use the connection to ask the bootstrapper for the list of clients #
# registered clients. #
# Append an entry to self.log #
##############################################################################
#print(" start query_bootstrapper_all_clients")
while self.status == Status.INITIAL:
pass
bootstrapperSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
bootstrapperSocket.connect((ip, port))
toSend = str(str(self.client_id) + ' sendList '+ '127.0.0.1' +' '+str(self.port))
# var = struct.pack('i', len(toSend))
# self.bootstrapperSocket.send(var)
bootstrapperSocket.send(toSend.encode('utf-8'))
#print(" sendList flag sent")
# length = self.bootstrapperSocket.recv(4)
# length_hdr = struct.unpack('i', length)[0]
data = bootstrapperSocket.recv(1048).decode('utf-8')
bootstrapperSocket.close()
clientDataToList = json.loads(data)
newData = data.replace('[','<').replace(']','>').replace('\"','')
newestData = newData[1:len(newData)-1]
if log:
q_dict = {}
q_dict['time'] = curr_time
q_dict['text'] = "Bootstrapper: "+newestData
#print(" query all clients id: "+str(self.client_id)+ " "+var)
self.log.append(q_dict)
return clientDataToList
def query_client_for_known_client(self, client_id, curr_time, log = True, ip='127.0.0.1', port=8888):
##############################################################################
# TODO: Connect to the client and get the list of clients it knows #
# Append an entry to self.log #
##############################################################################
correctClient = []
bootstrapperClients = self.query_bootstrapper_all_clients(curr_time, log=False)
count = 0
for client in bootstrapperClients:
#print("~~~~~len: " + str(len(bootstrapperClients)) + " client: "+str(self.client_id) + " at: "+str(curr_time)+" looking for: "+ str(client_id) + " count: "+ str(count) + " client info: "+str(client[0]) + " " + client[1] + " " + str(client[2]))
if client[0] == client_id:
#print("~~~~~client: "+str(self.client_id)+ " found correct client at: "+ str(count) + " " +str(client[0]) + " " + client[1] + " " + str(client[2]))
correctClient = client
break
count += 1
while self.status == Status.INITIAL:
pass
if len(correctClient) > 0:
otherClientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
otherClientSocket.connect((correctClient[1], int(correctClient[2])))
toSend = str(str(self.client_id) + ' knownClientsPlease '+ '127.0.0.1' +' '+str(self.port))
# var = struct.pack('i', len(toSend))
# self.bootstrapperSocket.send(var)
otherClientSocket.send(toSend.encode('utf-8'))
#print(" sendList flag sent")
# length = self.bootstrapperSocket.recv(4)
# length_hdr = struct.unpack('i', length)[0]
data = otherClientSocket.recv(1048).decode('utf-8')
otherClientSocket.close()
clientDataToList = json.loads(data)
newData = data.replace('[','<').replace(']','>').replace('\"','')
newestData = newData[1:len(newData)-1]
if log:
q_dict = {}
q_dict['time'] = curr_time
q_dict['text'] = "Client " + str(client_id) + ": " + newestData
print("~~~~~query_client_for_known_client log: "+ "time " + str(curr_time) + " Client " + str(client_id) + ": " + data)
self.log.append(q_dict)
return clientDataToList
def return_list_of_known_clients(self):
##############################################################################
# TODO: Return the list of clients known to you #
# HINT: You could make a set of <IP, Port> from self.content_originator_list #
# and return it. #
##############################################################################
returnClients = set()
for content in self.content_originator_list:
#print("~~~~~return_list_of_known_clients content: "+self.content_originator_list[content][1]+"#"+self.content_originator_list[content][2])
if self.client_id != int(self.content_originator_list[content][0]):
returnClients.add((self.content_originator_list[content][0], self.content_originator_list[content][1], self.content_originator_list[content][2]))
returnClients = [list(i) for i in returnClients]
returnClients = sorted(returnClients, reverse=True)
if self.client_id == 1:
return [self.client_id, '127.0.0.1', self.port]
#print("~~~~~return_list_of_known_clients: "+json.dumps(toReturn))
return [list(i) for i in returnClients]
def query_client_for_content_list(self, client_id, curr_time, log = True):
##############################################################################
# TODO: Connect to the client and get the list of content it has #
# Append an entry to self.log #
##############################################################################
correctClient = []
if log:
print("~~~~~~~~~~~query_client_for_content_list called from client_thread")
bootstrapperClients = self.query_bootstrapper_all_clients(curr_time, log=False)
count = 0
for client in bootstrapperClients:
if client[0] == client_id:
print("~~~~~client: "+str(self.client_id)+ " found correct client at: "+ str(count) + " " +str(client[0]) + " " + client[1] + " " + str(client[2]))
correctClient = client
break
count += 1
while self.status == Status.INITIAL:
pass
if len(correctClient) > 0:
otherClientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
otherClientSocket.connect((correctClient[1], int(correctClient[2])))
toSend = str(str(self.client_id) + ' contentList '+ '127.0.0.1' +' '+str(self.port))
otherClientSocket.send(toSend.encode('utf-8'))
data = otherClientSocket.recv(1048).decode('utf-8')
otherClientSocket.close()
print("~~~~~~~~~~~content list from client: "+str(client_id)+ " "+data)
clientDataToList = json.loads(data)
newData = data.replace('[','<').replace(']','>').replace('\"','')
newestData = newData[1:len(newData)-1]
if log:
q_dict = {}
q_dict['time'] = curr_time
q_dict['text'] = "Client " + str(client_id) + ": " + newestData
self.log.append(q_dict)
print("~~~~~~~~~~~done logging")
return clientDataToList
def return_content_list(self):
##############################################################################
# TODO: Return the content list that you have (self.content) #
##############################################################################
return self.content.copy()
def request_content(self, content_id, curr_time):
#####################################################################################################
# TODO: Your task is to obtain the content and append it to the #
# self.content list. To do this: #
# The program will first query the bootstrapper for a set #
# of all clients. Then start sending requests for the content to each of these clients in a #
# serial fashion. From any P2Pclient, it might either receive the content, or may receive #
# a hint about where the content might be located. On receiving an hint, the client #
# attempts that P2Pclient before progressing ahead with its current list. If content is #
# not found, then it proceeds to request every P2PClient for a list of all other clients it #
# knows and creates a longer list for trying various P2PClients. You can use the above query #
# methods to help you in fetching the content. #
# Make sure that when you are querying different clients for the content you want, you record#
# their responses(hints, if present) appropriately in the self.content_originator_list #
# Append an entry to self.log that content is obtained #
#####################################################################################################
#do we need to keep bootstrapper_all_clients etc from logging? if so how?
#go though each client and send the content id
# each client will check in it's own content for the content ID
# if not each clinet will check its COL
# if none of that send a NO
# if you get a NO go to next client
# if you get a hint then jump to next client
# when tou get the data log it an append
correctContentClient = ["1","IP","Port"]
bootstrapperClients = self.query_bootstrapper_all_clients(curr_time, log=False)
client_index = 0
hint = 0
found = False
loop_count = 0
list_of_ids = [i[0] for i in bootstrapperClients]
print("~~~~~list of ids "+json.dumps(list_of_ids))
while not found and client_index < len(bootstrapperClients) and loop_count < 10:
print("~~~~~Client index "+str(client_index) + " " + str(len(bootstrapperClients)))
client = bootstrapperClients[client_index]
print("~~~~~Client: "+str(self.client_id)+" Looking at client: "+str(client[0]))
if client[0] != self.client_id:
contentList = self.query_client_for_content_list(client[0],curr_time, log=False)
if not contentList:
print("~~~~~content list is empty "+str(client[0]))
break
in_content_list = False
in_col = None
for content in contentList:
self.content_originator_list[content] = [client[0], client[1], client[2]]
if content == content_id:
print("~~~~~found content in "+str(client[0]))
correctContentClient = [client[0], client[1], client[2]]
found = True
in_content_list = True
break
if not in_content_list:
if content_id in self.content_originator_list:
in_col = self.content_originator_list[content_id]
hint = in_col[0]
print("~~~~~found hint at client_id "+str(hint))
client_index = list_of_ids.index(hint)
else:
client_index += 1
else:
client_index += 1
loop_count += 1
content_copy = self.content.copy()
content_copy.append(content_id)
self.content = content_copy
print("~~~~~just added new content to client_id: "+str(self.client_id)+ " " + json.dumps(self.content))
q_dict = {}
q_dict["time"] = curr_time
q_dict["text"] = str("Obtained "+str(content_id)+" from " + correctContentClient[1] + "#" + correctContentClient[2])
self.log.append(q_dict)
def purge_content(self, content_id, curr_time):
#####################################################################################################
# TODO: Delete the content from your content list #
# Append an entry to self.log that content is purged #
#####################################################################################################
#still gotta do this
self.content.remove(content_id)
purge_dict = {}
purge_dict["time"] = curr_time
purge_dict["text"] = str("Removed "+str(content_id))
self.log.append(purge_dict)
| true |
bd5a7aafc2a007e300086f58552a3d6174b04195 | Python | AlexBlazee/Learningcurve-opencv | /timestamp.py | UTF-8 | 987 | 2.984375 | 3 | [] | no_license | import picamera
import cv2
from subprocess import call
from datetime import datetime
from time import sleep
#OUR file path
filepath = "C:/Users/Royale121/Desktop/lane dec/"
#grabbing present time
currentTime = datetime.now()
#creating file name for pic
picTIme = currentTime.strftime("%Y.%m.%d-%H%M%S")
picName = picTime + '.jpg'
completeFilePath = filePath + picName
#Take picture using new filepath
with picamera.Picamera() as camera :
camera.resolution = (1280,720)
camera.capture(completeFilePath)
print("We have taken the picture ")
#Create our stamp variable
timestampMessage = currentTime.strftime("%Y.%m.%d - %H:%M:%S")
# create time stamp command to have executed
timestampCommand = "/usr/bin/convert " + completeFilePath + " -pointsize 36 -fill red \
-annotate +700+650 '" + timestampMessage + "' " + completeFilePath
#Execute the commmand
call([timestampCommand] , shell = True )
print("We have timestamped our picture!")
| true |
8e63217eea47bd86f375f646534c1de91aec0e0d | Python | aspose-email/Aspose.Email-Python-Dotnet | /Examples/POP3/GettingMailboxInfo.py | UTF-8 | 755 | 2.65625 | 3 | [
"MIT"
] | permissive | from aspose.email.clients.pop3 import Pop3Client
from aspose.email.clients import SecurityOptions
def run():
client = Pop3Client("pop.gmail.com", 995, "username", "password")
client.security_options = SecurityOptions.AUTO
#ExStart:GettingMailboxInfo
#Get the size of the mailbox, Get mailbox info, number of messages in the mailbox
nSize = client.get_mailbox_size();
print("Mailbox size is " + str(nSize) + " bytes.");
info = client.get_mailbox_info();
nMessageCount = info.message_count;
print("Number of messages in mailbox are " + str(nMessageCount));
nOccupiedSize = info.occupied_size;
print("Occupied size is " + str(nOccupiedSize));
#ExEnd: GettingMailboxInfo
if __name__ == '__main__':
run()
| true |
f0c2a3870ac1dcb68067875025265ce8a9f8c8ff | Python | csaddison/Physics-129L | /Homework_5/exercise3.py | UTF-8 | 568 | 3.046875 | 3 | [] | no_license | #------------------------------------------------------------
# Conner Addison 8984874
# Physics 129L
#------------------------------------------------------------
# Homework 5, Exercise 3
import math
import numpy as np
import matplotlib.pyplot as plt
from ccHistStuff import statBox
# Loading data
txt_file = 'mass.txt'
bins = 20
data = np.loadtxt(txt_file)
# Setting up figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(data, bins)
ax.set(title = 'Mass frequency distribution', xlabel = 'Mass', ylabel = 'Frequency of value')
statBox(ax, data, data)
plt.show() | true |
e35b24507213967fbf4938e57799d61684108985 | Python | a-khomitskyi/SJ-Practice | /Task 2/app.py | UTF-8 | 593 | 2.53125 | 3 | [] | no_license | from flask import Flask, request, render_template
from aggregator import parse
app = Flask(__name__)
@app.route('/convert_currency/', methods=['GET'])
def convert_func():
user_currency = request.args.get('currency')
user_amount = request.args.get('amount')
for i in parse():
if user_currency.upper() == i['Код літерний']:
result = i['Офіційний курс'] / i['Кількість одиниць валюти'] * float(user_amount)
return render_template('index.html', result=result)
if __name__ == '__main__':
app.run('0.0.0.0') | true |