blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
143cd8185199f0ec326c01030553e6c06d149f17 | 918a575dbf2f5065341c045434afd7cdd69c5f3e | /insta.py | c0fa9f3269a911cdfbb9857cb06708f70226a9ae | [] | no_license | arkarthick/instagramscambot | 8a083ac94797d77928292fb12fc5567334f2756a | 21f335ab6acfd0e1f1120588299eb7ad1a8ed454 | refs/heads/master | 2021-05-20T00:18:11.659104 | 2020-04-06T15:03:28 | 2020-04-06T15:03:28 | 252,103,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,792 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from time import sleep
from random import randint
from details import dob
import asset.password as password
ASSET = 'asset/'
def login(username):
#enter the username or mail and password
try:
sleep(3)
inElement = driver.find_element_by_name('username')
inElement.send_keys(username)
inElement = driver.find_element_by_name('password')
inElement.send_keys(password.password)
inElement.send_keys(Keys.RETURN)
# login_btn = driver.find_element_by_xpath('/html/body/div[1]/section/main/div/article/div/div[1]/div/form/div[4]/button').send_keys(Keys.RETURN)
# print('logged in')
sleep(7)
print('logged in as '+ username)
try:
not_now = driver.find_element_by_xpath('/html/body/div[4]/div/div/div[3]/button[2]')
not_now.click()
print('notification turned off')
sleep(2)
except NoSuchElementException:
pass
except NoSuchElementException:
print('login failed')
#goes to the profile area
try:
path_profile = '/html/body/div[1]/section/nav/div[2]/div/div/div[3]/div/div[5]/span'
profile = driver.find_element_by_xpath(path_profile)
except NoSuchElementException:
path_profile = '/html/body/div[1]/section/nav/div[2]/div/div/div[3]/div/div[4]/span'
profile = driver.find_element_by_xpath(path_profile)
try:
profile.click()
sleep(2)
except NoSuchElementException:
print('profile icon not found')
#get the username
global username_cap
try:
username_cap = driver.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[1]/h2').text
# return username_cap
except:
username_cap = driver.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[1]/h2').text
print(username_cap)
# def get_username():
# # global username_cap
# goto_profile_init()
def goto_profile():
#redirect to the profile page
driver.get('https://instagram.com/'+username_cap+'/')
def goto_editprofile():
#press the edit profile icon in the profile page
goto_profile()
path_edit_profile = '/html/body/div[1]/section/main/div/header/section/div[1]/a/button'
try:
editprofile = driver.find_element_by_xpath(path_edit_profile)
editprofile.click()
sleep(1)
except NoSuchElementException:
print('edit profile btn not found')
def profilesetting():
#press the setting icon in the profile page
goto_profile()
path_profile_set = '/html/body/div[1]/section/main/div/header/section/div[1]/div/button'
try:
profile_setting = driver.find_element_by_xpath(path_profile_set)
profile_setting.click()
sleep(2)
except NoSuchElementException:
print('profile setting not found')
def privacy_setting():
# changes the privacy of the account
profilesetting()
path_privacy = '/html/body/div[4]/div/div/div/button[5]'
path_privacy_chkbx = '/html/body/div[1]/section/main/div/article/main/section[1]/div/div/div/label/div'
try:
privacy = driver.find_element_by_xpath(path_privacy)
privacy.click()
sleep(5)
try:
privacy_chkbx = driver.find_element_by_xpath(path_privacy_chkbx)
privacy_chkbx.click()
print('Privacy of the account is changed')
sleep(5)
goto_profile()
except:
print('checkbox not found')
except NoSuchElementException:
print('privacy_setting failed')
pass
def generate_bio(bio_data):
date_of_birth = dob()
if date_of_birth != None:
bio = [bio_data +'\n' + date_of_birth , date_of_birth +'\n' + bio_data ]
return bio[randint(0,1)]
else:
return bio_data
def set_bio():
bio_data = 'I’m not smart; I just wear glasses.'
goto_editprofile()
bio_data = generate_bio(bio_data)
path_bio = '//*[@id="pepBio"]'
try:
bio = driver.find_element_by_xpath(path_bio)
bio.send_keys(bio_data)
submit = driver.find_element_by_xpath('/html/body/div[1]/section/main/div/article/form/div[11]/div/div/button[1]')
submit.click()
sleep(4)
print('bio updated')
goto_profile()
except NoSuchElementException:
print('set_bio operation failed')
def logout():
# logout from the account
profilesetting()
path_logout = '/html/body/div[4]/div/div/div/button[9]'
logout = driver.find_element_by_xpath(path_logout).click()
sleep(2)
print('logged out')
print('\n\n')
try:
driver.find_element_by_xpath('/html/body/div[4]/div/div/div[2]/button[1]').click()
except NoSuchElementException:
pass
sleep(2)
def like(link_id):
url = 'https://www.instagram.com/p/'
driver.get(url+link_id+'/')
sleep(5)
try:
driver.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/article/div[2]/section[1]/span[1]/button').click()
sleep(6)
print('liked')
sleep(5)
goto_profile()
except Exception as e:
print('like btn not found')
def change_profile():
pass
def follow(username):
url = 'https://www.instagram.com/'+username+'/'
driver.get(url)
sleep(2)
follow_btn = driver.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[1]/div[1]/span/span[1]/button')
follow_btn.click()
sleep(10)
def run(user_id, link_id_list, usernames):
driver.get('https://www.instagram.com')
for user in user_id:
login(user)
# username = get_username()
# get_username()
with open(ASSET+'username.txt', 'a') as f:
f.write(username_cap+'\n')
# privacy_setting()
# # set_bio()
# if link_id_list != None:
# for link in link_id_list:
# if link != ' ':
# like(link)
# if usernames !=None:
# for username in usernames:
# follow(username)
logout()
driver = webdriver.Firefox()
# 'B-ZvdXZha17', 'B-R_W0IBEYO','B-coY20KYZV'
link_id_list = ['B-ZvdXZha17', 'B-R_W0IBEYO','B-coY20KYZV']
usernames = None
# user_id = ['paviparvathi123@hotmail.com']
with open(ASSET+'mail_id.txt', 'r') as f:
user_id = f.readlines()
run(user_id, link_id_list, usernames) | [
"arkarthick21@gmail.com"
] | arkarthick21@gmail.com |
d2d9be4351b49f74dac271402be606547ab75990 | a3e274be3c07335875d0b2e9cee509193abc733f | /tests/test_frontend.py | fb53bb89cafc04d83599d93a31d940e97d3016e2 | [] | no_license | faisalburhanudin/dss | 4282ff4cfff246c26f3eb5ac9f1a5cbecbcd6882 | 86fb43538f769a5419191e14d49444cde18274ad | refs/heads/master | 2021-01-01T04:12:43.700138 | 2016-06-09T13:44:31 | 2016-06-09T13:44:31 | 58,213,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from tests.mock import ServerTest
class FrontendTestCase(ServerTest):
def test_home(self):
response = self.app.get("/")
self.assertIn("Sistem pendukung keputusan", response.data) | [
"gogildrive@gmail.com"
] | gogildrive@gmail.com |
d40cd31abf11f56654ac4527dae256fda859f677 | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/monitoring/metricsscope/v1/monitoring-metricsscope-v1-py/google/monitoring/metricsscope_v1/types/metrics_scope.py | a93ce94034c247585e1062da91012ee0aa0ff16c | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.monitoring.metricsscope.v1',
manifest={
'MetricsScope',
'MonitoredProject',
},
)
class MetricsScope(proto.Message):
r"""Represents a `Metrics
Scope <https://cloud.google.com/monitoring/settings#concept-scope>`__
in Cloud Monitoring, which specifies one or more Google projects and
zero or more AWS accounts to monitor together.
Attributes:
name (str):
Immutable. The resource name of the Monitoring Metrics
Scope. On input, the resource name can be specified with the
scoping project ID or number. On output, the resource name
is specified with the scoping project number. Example:
``locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}``
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when this ``Metrics Scope`` was
created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when this ``Metrics Scope`` record was
last updated.
monitored_projects (Sequence[google.monitoring.metricsscope_v1.types.MonitoredProject]):
Output only. The list of projects monitored by this
``Metrics Scope``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
create_time = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
monitored_projects = proto.RepeatedField(
proto.MESSAGE,
number=4,
message='MonitoredProject',
)
class MonitoredProject(proto.Message):
r"""A `project being
monitored <https://cloud.google.com/monitoring/settings/multiple-projects#create-multi>`__
by a ``Metrics Scope``.
Attributes:
name (str):
Immutable. The resource name of the ``MonitoredProject``. On
input, the resource name includes the scoping project ID and
monitored project ID. On output, it contains the equivalent
project numbers. Example:
``locations/global/metricsScopes/{SCOPING_PROJECT_ID_OR_NUMBER}/projects/{MONITORED_PROJECT_ID_OR_NUMBER}``
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time when this ``MonitoredProject`` was
created.
"""
name = proto.Field(
proto.STRING,
number=1,
)
create_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
0599ec6cb89e171675f5adefb7e3706a332115f2 | 779b311600ff5e027cda8f654b5901f6d1611f0b | /devel/lib/python2.7/dist-packages/ublox_msgs/msg/_RxmSVSI_SV.py | cc068ed56664ccc787375fc3e58f08e3d98cbe51 | [] | no_license | KeenRunner/UnmannedSurfaceVessel-USV-PIDcontrol | 2ccd987d1bc18745e173a2594d7bc4fd9519d25c | 7f670bf8a57f3a48f964ce3239db8f25827ee512 | refs/heads/master | 2023-02-05T04:05:21.263512 | 2020-12-24T04:17:16 | 2020-12-24T04:17:16 | 323,601,637 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,275 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from ublox_msgs/RxmSVSI_SV.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class RxmSVSI_SV(genpy.Message):
_md5sum = "055e3ca33052c1635aff80c3f8ab6197"
_type = "ublox_msgs/RxmSVSI_SV"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# see message RxmSVSI
#
uint8 svid # Satellite ID
uint8 svFlag # Information Flags
uint8 FLAG_URA_MASK = 15 # Figure of Merit (URA) range 0..15
uint8 FLAG_HEALTHY = 16 # SV healthy flag
uint8 FLAG_EPH_VAL = 32 # Ephemeris valid
uint8 FLAG_ALM_VAL = 64 # Almanac valid
uint8 FLAG_NOT_AVAIL = 128 # SV not available
int16 azim # Azimuth
int8 elev # Elevation
uint8 age # Age of Almanac and Ephemeris
uint8 AGE_ALM_MASK = 15 # Age of ALM in days offset by 4
# i.e. the reference time may be in the future:
# ageOfAlm = (age & 0x0f) - 4
uint8 AGE_EPH_MASK = 240 # Age of EPH in hours offset by 4.
# i.e. the reference time may be in the future:
# ageOfEph = ((age & 0xf0) >> 4) - 4
"""
# Pseudo-constants
FLAG_URA_MASK = 15
FLAG_HEALTHY = 16
FLAG_EPH_VAL = 32
FLAG_ALM_VAL = 64
FLAG_NOT_AVAIL = 128
AGE_ALM_MASK = 15
AGE_EPH_MASK = 240
__slots__ = ['svid','svFlag','azim','elev','age']
_slot_types = ['uint8','uint8','int16','int8','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
svid,svFlag,azim,elev,age
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RxmSVSI_SV, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.svid is None:
self.svid = 0
if self.svFlag is None:
self.svFlag = 0
if self.azim is None:
self.azim = 0
if self.elev is None:
self.elev = 0
if self.age is None:
self.age = 0
else:
self.svid = 0
self.svFlag = 0
self.azim = 0
self.elev = 0
self.age = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2BhbB().pack(_x.svid, _x.svFlag, _x.azim, _x.elev, _x.age))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 6
(_x.svid, _x.svFlag, _x.azim, _x.elev, _x.age,) = _get_struct_2BhbB().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2BhbB().pack(_x.svid, _x.svFlag, _x.azim, _x.elev, _x.age))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 6
(_x.svid, _x.svFlag, _x.azim, _x.elev, _x.age,) = _get_struct_2BhbB().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2BhbB = None
def _get_struct_2BhbB():
global _struct_2BhbB
if _struct_2BhbB is None:
_struct_2BhbB = struct.Struct("<2BhbB")
return _struct_2BhbB
| [
"530673741@qq.com"
] | 530673741@qq.com |
5fab933958093bc018267f192e75a6935b2aa85e | b3edf510803fdd6a75c70b865f96e4dd93feee20 | /pictures/Grey/GrAttack2L.py | 94a775cc94e54204326b909033bb503e58b1437f | [] | no_license | chenster17/Clash-of-The-Fairy-Tail | 354001dcef76b4c18ca86ed528c5b5f99807f7ab | 024126db047aa49f8d161efb5ec1f23f6e722784 | refs/heads/master | 2021-01-10T13:37:07.456169 | 2016-01-20T07:12:45 | 2016-01-20T07:12:45 | 50,006,115 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | # GrAttack2L.py
# A Sprite is an old term that used to refer to specialized hardware that
# was used to draw characters is a video game sperate from the background
# Now a sprite just refers to a 2D object in a game, usually a character
# A spritesheet is a sheet of all of the frames of animation that a character
# uses in a game to walk or preform actions.
#
# Pygame has a Sprite module that can help with sprites but it is a bit too
# advances for our needs. To achieve simple animation in python we simply
# display the frames of the picture in order.
from pygame import *
init()
size = width, height = 800, 600
screen = display.set_mode(size)
running = True
myClock = time.Clock()
x=150
arrow=image.load("GrArrowL.png")
frame = 0
frame2=0
pics = []
for i in range(3):
pics.append(image.load("GrAttack2L" + str(i) + ".png"))
while running:
for evnt in event.get(): # checks all events that happen
if evnt.type == QUIT:
running = False
screen.fill((150,220,150))
x-=50
arrowrect=Rect(x,100,30,30) #the damage rect
screen.blit(arrow,arrowrect)
if x<2 :
x=700
screen.blit(pics[frame],(700,100))
frame += 1
if frame == 3:
while True:
#screen.fill((150,220,150))
frame=0
break
display.flip()
myClock.tick(10)
quit()
| [
"jchen1117@gmail.com"
] | jchen1117@gmail.com |
76af0a371f3d1e84a4dd1b3e5ce44a42071c751f | a9ecbb6324e6ff6ecdc3ef954c83aae7a6cfd7c1 | /rwsegment/boundary_utils.py | 5d36015de0462f330ff0169a33d797b994642f2f | [] | no_license | dgboy2000/segmentation-svm | de394def73bd163905b0a6b481f5a8709e5bacc4 | eaa6dd6c903de3489c0dbbc7bce56142e2865bea | refs/heads/master | 2020-04-10T14:53:48.981663 | 2012-12-17T19:53:15 | 2012-12-17T19:53:15 | 5,313,995 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,975 | py | import numpy as np
from scipy import ndimage
def sample_points(im, step, mask=None, maxiter=20):
shape = np.asarray(im.shape, dtype=int)
steps = np.ones(im.ndim, dtype=int)*step
## make regular grid
grid = np.zeros(shape)
grid[::steps[0], ::steps[1], ::steps[2]] = 1
if mask is not None:
grid = grid * mask
points0 = np.argwhere(grid)
points = points0
## move points
fim = ndimage.gaussian_filter(im.astype(float), sigma=1.0)
emap = np.sqrt(np.sum([ndimage.sobel(fim, axis=d)**2 for d in range(im.ndim)], axis=0))
gradient = np.asarray(np.gradient(emap)).astype(float)
for i in range(np.max(steps)/2):
axes = i < (steps/2)
if i >= maxiter: break
dp = gradient[(slice(None),) + tuple(points.T)].T
dp = dp/np.c_[np.maximum(np.max(np.abs(dp), axis=1),1e-10)]
dp = (dp + 0.5*(-1)**(dp<0)).astype(int)
for axe in np.where(axes)[0]:
points[:,axe] = (points - dp)[:,axe]
points = points[np.all(points>0, axis=1)&np.all(points<shape,axis=1)]
return points
from fast_marching import fastm
def get_edges(im, pts, mask=None):
speed = 1/np.sqrt(np.sum(
[ndimage.sobel(im,axis=d)**2 for d in range(im.ndim)],axis=0) + 1e-5)
## compute edges
labels, dist, edges, edgev = fastm.fast_marching_3d(
speed,
pts,
heap_size=1e6,
offset=1e-2,
mask=mask,
output_arguments=('labels', 'distances', 'edges', 'edge_values'),
)
return edges, edgev, labels
def get_profiles(im, points, edges, rad=0):
emap = np.sqrt(np.sum(
[ndimage.sobel(im,axis=d)**2 for d in range(im.ndim)],axis=0) + 1e-5)
emap = emap - np.min(emap)
emap /= np.std(emap)
## extract intensity
dists = np.sqrt(np.sum((points[edges[:,0]] - points[edges[:,1]])**2,axis=1))
profiles = []
for i,e in enumerate(edges):
pt0 = points[e[0]]
pt1 = points[e[1]]
vec = (pt1 - pt0).astype(float)
vec /= np.max(np.abs(vec))
if np.abs(vec[0])+np.abs(vec[1]) < 1e-5:
par1 = np.array([1.,0.,0.])
par2 = np.array([0.,1.,0.])
else:
par1 = np.array([vec[1], -vec[0], 0])
par1 /= np.sqrt(np.sum(par1**2))
par2 = np.array([vec[0]*vec[2], vec[1]*vec[2], -(vec[0]**2 + vec[1]**2)])
par2 /= np.sqrt(np.sum(par2**2))
dist = int(dists[i] + 1)
line = np.asarray([(1-t)*pt0 + t*pt1 for t in np.linspace(0,1,dist)])
disk = (np.argwhere(np.ones((2*rad+1, 2*rad+1))) - rad)/float(rad + 1*(rad==0))
disk = disk[np.sum(disk**2,axis=1) < 1.00000001]
profile = 0
for d in disk:
transl = par1*d[0] + par2*d[1]
tline = (line + transl + 0.5).astype(int)
if not np.all(np.all(tline>=[0,0,0],axis=1)&np.all(tline<im.shape,axis=1)):
continue
profile = profile + emap[tuple(tline.T)]
profile /= len(disk)
profiles.append(profile)
# profiles[e[1]].append([e[0],profile])
return profiles, emap, dists
def make_features(profiles,size=None, additional=None):
from scipy import interpolate
if size is None:
size = 0
for profile in profiles:
size += len(profile)/float(len(profiles))
size = int(size + 1)
sizes = [size/4, size/2, size]
x = []
for i,profile in enumerate(profiles):
n = len(profile)
feature = []
if n<4:
interpolator = interpolate.interp1d(np.linspace(0,1,n), profile, kind='linear')
else:
interpolator = interpolate.interp1d(np.linspace(0,1,n), profile, kind='cubic')
for size in sizes:
d = interpolator(np.linspace(0,1,size))
feature.extend(d.tolist())
## add features, including average, std, min, max, length
x.append(feature + [np.mean(d), np.std(d), np.max(d), np.min(d),n])
if additional is not None:
x[-1].extend([ad[i] for ad in additional])
return x
def is_boundary(points, edges, seg):
l1 = seg[tuple(points[edges[:,0]].T)]
l2 = seg[tuple(points[edges[:,1]].T)]
return l1!=l2
class Classifier(object):
def __init__(self,w=None):
if w is not None:
self.w = w
def loss(self,z,y,**kwargs):
return 1*(y!=z) + 0.0
def psi(self,x,y,**kwargs):
if y==0:
return np.r_[x, [0.0 for i in x]]
else:
return np.r_[[0.0 for i in x],x]
def mvc(self,w,x,z,**kwargs):
scores = [np.dot(w,self.psi(x,y)) - self.loss(z,y) for y in [0,1]]
#print z, scores
return np.argmin(scores)
def train(self, x, z, balanced=True, **kwargs):
import struct_svm
struct_svm.logger = struct_svm.utils_logging.get_logger('svm',struct_svm.utils_logging.INFO)
from struct_svm import StructSVM
C = kwargs.pop('C', 1.)
if balanced:
n0 = np.where(np.asarray(z)==0)[0]
n1 = np.where(np.asarray(z)==1)[0]
n = np.minimum(len(n0), len(n1))
in0 = np.random.permutation(n0)[:n]
in1 = np.random.permutation(n1)[:n]
S = [(x[i],z[i]) for i in in0]
S += [(x[i],z[i]) for i in in1]
else:
S = [(x[i],z[i]) for i in range(len(z))]
svm = StructSVM(S, self.loss, self.psi, self.mvc,C=C,)
## train svm
w,xi,info = svm.train()
self.w = w
self.xi = xi
def classify(self,x):
w = self.w
sol = []
scores = []
for d in x:
score = [np.dot(w,self.psi(d,y)) for y in [0,1]]
y = np.argmin(score)
sol.append(y)
scores.append(score)
return sol, scores
| [
"pybaudin@gmail.com"
] | pybaudin@gmail.com |
db507a51eafac74ecb3bac782299bb61e9c5a2d6 | f98418686ebdc68d12b6c8bc657e93b8df3a6755 | /untitled1/max.py | 2a9dc031b1fc9487b607b9c8a56e6c9113433858 | [] | no_license | sunnyyong2/algorithm | a666924a37ce20b74488040d401bb0745f3da8bf | 75ab06d0d2b1ae595742fe14d9d81e240bcc28d0 | refs/heads/master | 2020-07-04T16:01:10.526471 | 2019-10-02T00:12:30 | 2019-10-02T00:12:30 | 202,331,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | T = int(input())
for i in range(1,T+1):
tc = list(map(int,input().split()))
M = 0
for num in tc :
if num > M :
M = num
print(M)
| [
"studying_yong@naver.com"
] | studying_yong@naver.com |
852299ad0b043f21d23b3e7b3f6804d56a0c199e | 5f99e281f3cb50845643336dbe6e2442ce6827d3 | /main.py | f98ab1e3e0b60e9089d4d89a4fd4d817028b7d9e | [] | no_license | MekhiRafiki/StockPredictor | 9e13fc539918ffe81d00cb857f1c6c4b693e29c9 | 2598a7df494dca8dcfbe5f93fc35f91049ac66fb | refs/heads/master | 2020-05-19T02:30:26.329899 | 2019-06-04T22:10:49 | 2019-06-04T22:10:49 | 184,782,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,208 | py | import numpy as np
import regression
import sentimentAnalysis as SA
import datetime
SENTIMENT_BOUNDARY = 0
class NeuralNetwork():
def __init__(self):
# seeding for random number generation
np.random.seed(1)
#converting weights to a 3 by 1 matrix with values from -1 to 1 and mean of 0
self.synaptic_weights = 2 * np.random.random((2, 1)) - 1
def sigmoid(self, x):
#applying the sigmoid function
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
#computing derivative to the Sigmoid function
return x * (1 - x)
def train(self, training_inputs, training_outputs, training_iterations):
#training the model to make accurate predictions while adjusting weights continually
for iteration in range(training_iterations):
#siphon the training data via the neuron
output = self.think(training_inputs)
#computing error rate for back-propagation
error = training_outputs - output
#performing weight adjustments
adjustments = np.dot(training_inputs.T, error * self.sigmoid_derivative(output) * 10)
self.synaptic_weights += adjustments
#print("Weights after Adjustments: ", self.synaptic_weights)
def think(self, inputs):
#passing the inputs via the neuron to get output
#converting values to floats
inputs = inputs.astype(float)
output = self.sigmoid(np.dot(inputs, self.synaptic_weights))
return output
def main(companyName, companySymbol, pred_date, weights, companyDict = {}):
#initializing the neuron class
neural_network = NeuralNetwork()
#print("Beginning Randomly Generated Weights: ")
#print(neural_network.synaptic_weights)
if len(weights) == 0:
#training data consisting of 4 examples--3 input values and 1 output
training_examples = []
training_examples_outputs = []
companyDict = {} # Avoid Slowdown from Yahoo and Nasdaq during training. Cache the results
for i in range(1, 13):
example = []
# set up dates
latestDate = datetime.datetime(2018, i, 2)
if i == 12:
endDate = datetime.datetime(2019, 1, 1)
else:
endDate = datetime.datetime(2018, i+1, 1)
sentiment_analysis_tuple = SA.getSentiment(companySymbol, companyName, latestDate, endDate, companyDict)
example.append(sentiment_analysis_tuple[0])
# example.append(float(sentiment_analysis_tuple[1]/100))
companyDict = sentiment_analysis_tuple[2]
while True:
try:
regression_tuple = regression.main(companySymbol, endDate)
if regression_tuple[1] == None:
#print('No data for {}. Trying the next market day'.format(endDate.isoformat()))
endDate = endDate.replace(year = endDate.year, month=endDate.month, day = endDate.day+1)
else:
break
except:
#print('Markets were closed on {}. Trying the next day this month'.format(endDate.isoformat()))
endDate = endDate.replace(year = endDate.year, month=endDate.month, day = endDate.day+1)
#print(regression_tuple)
example.append(regression_tuple[0])
training_examples_outputs.append(regression_tuple[1]) #truth value
training_examples.append(example)
training_inputs = np.array(training_examples)
#print(training_inputs)
#training_inputs = np.array([[0,0,1],
# [1,1,1],
# [1,0,1],
# [0,1,1]])
#print(training_examples_outputs)
training_outputs = np.array([training_examples_outputs]).T
#training_outputs = np.array([[0,1,1,0]]).T
#training taking place
neural_network.train(training_inputs, training_outputs, 15000)
else:
neural_network.synaptic_weights = weights
#print("Ending Weights After Training: ")
#print(neural_network.synaptic_weights)
pred_back = pred_date.replace(year = pred_date.year, month=pred_date.month, day = pred_date.day-2)
input_features = []
sentiment_analysis_tuple = SA.getSentiment(companySymbol, companyName, pred_back, pred_date, companyDict)
#print(sentiment_analysis_tuple)
input_features.append(sentiment_analysis_tuple[0])
#input_features.append(float(sentiment_analysis_tuple[1]/100))
#print(sentiment_analysis_tuple[1])
regression_tuple = regression.main(companySymbol, pred_date)
input_features.append(regression_tuple[0])
actual = regression_tuple[1]
#print(pred_date.isoformat())
#print("\t Sentiment", sentiment_analysis_tuple[0])
#if sentiment_analysis_tuple[0] > SENTIMENT_BOUNDARY:
# return (1, neural_network.synaptic_weights, companyDict)
#else:
# return (0, neural_network.synaptic_weights, companyDict)
print("\n Considering New Situation: " + str(input_features))
prediction = neural_network.think(np.array(input_features))
print("Trial for day ", pred_date.isoformat())
print("\t Prediction: ", prediction)
print("\t Actual Change: ", actual)
if prediction < actual + .1 and prediction > actual - .1:
return (1, neural_network.synaptic_weights, companyDict)
else:
return (0, neural_network.synaptic_weights, companyDict)
if __name__ == "__main__":
companyName = str(input("Input Company Name: "))
companySymbol = str(input("Input Company Symbol: "))
#training_prediction_dates = [datetime.datetime(2018, 1, 3),
# datetime.datetime(2018, 2, 3),
# datetime.datetime(2018, 3, 3),
# datetime.datetime(2018, 4, 3),
# datetime.datetime(2018, 5, 3),
# datetime.datetime(2018, 6, 3),
# datetime.datetime(2018, 7, 3),
# datetime.datetime(2018, 8, 3),
# datetime.datetime(2018, 9, 5),
# datetime.datetime(2018, 10, 3)]
prediction_dates = [datetime.datetime(2019, 4, 25),
datetime.datetime(2019, 4, 30),
datetime.datetime(2019, 5, 7),
datetime.datetime(2019, 5, 9),
datetime.datetime(2019, 5, 14),
datetime.datetime(2019, 5, 16),
datetime.datetime(2019, 5, 21),
datetime.datetime(2019, 5, 23),
datetime.datetime(2019, 5, 28),
datetime.datetime(2019, 5, 30)]
correct = 0
weights = []
companyDict = {}
for date in prediction_dates:
trial = main(companyName, companySymbol, date, weights, companyDict)
weights = trial[1]
companyDict = trial[2]
if trial[0] == 1:
correct += 1
print("Overall Accuracy: ", correct/len(prediction_dates))
| [
"mekhi.rafiki@gmail.com"
] | mekhi.rafiki@gmail.com |
20db424bef33ad06093993ca3a270079e758c48c | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /fair_survival_analysis/fair_survival_analysis/plots.py | 200d6bbca2d5bf701befac7a2203e7a7be1b5047 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 10,705 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitites to plot the ROC and Calibration for survival models.
This module has utility functions to generate ROC and Calibration plots for
survival models at given horizons of time. Note that ideally both the ROC and
Calibration curves require to be adjusted for censoring using IPCW estimates.
Not designed to be called directly, would be called when running a function from
fair_survival_analysis.fair_survival_analysis
"""
from fair_survival_analysis import baseline_models
from fair_survival_analysis import models
from fair_survival_analysis.utils import calibration_curve
import matplotlib as mpl
from matplotlib import pyplot as plt
from metrics import cumulative_dynamic_auc
import numpy as np
from sklearn.metrics import auc
def plot_calibration_curve(plot,
scores,
e,
t,
a,
folds,
group,
quant,
strat='quantile',
adj='IPCWpop'):
"""Function to plot Calibration Curve at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted calibration curve.
Args:
plot:
a trained survival analysis model
(output of fair_survival_analysis.models.train_model).
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
group:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj (str):
Determines if IPCW adjustment is carried out on a population or subgroup
level.
One of "IPCWpop", "IPCWcon" (not implemented).
Returns:
A plotted matplotlib calibration curve.
"""
allscores = np.ones_like(t).astype('float')
for fold in set(folds):
allscores[folds == fold] = scores[fold]
scores = allscores
b_fc = (0, 0, 1, .4)
r_fc = (1, 0, 0, .2)
b_ec = (0, 0, 1, .8)
r_ec = (1, 0, 0, .8)
n_bins = 20
hatch = '//'
fs = 16
prob_true_n, _, outbins, ece = calibration_curve(
scores,
e,
t,
a,
group,
quant,
typ=adj,
ret_bins=True,
strat=strat,
n_bins=n_bins)
for d in range(len(prob_true_n)):
binsize = outbins[d + 1] - outbins[d]
binloc = (outbins[d + 1] + outbins[d]) / 2
gap = (prob_true_n[d] - binloc)
if gap < 0:
bottom = prob_true_n[d]
else:
bottom = prob_true_n[d] - abs(gap)
if d == len(prob_true_n) - 1:
lbl1 = 'Score'
lbl2 = 'Gap'
else:
lbl1 = None
lbl2 = None
plot.bar(
binloc,
prob_true_n[d],
width=binsize,
facecolor=b_fc,
edgecolor=b_ec,
linewidth=2.5,
label=lbl1)
plot.bar(
binloc,
abs(gap),
bottom=bottom,
width=binsize,
facecolor=r_fc,
edgecolor=r_ec,
linewidth=2.5,
hatch=hatch,
label=lbl2)
d += 1
plot.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)
plot.set_xlabel('Predicted Score', fontsize=fs)
plot.set_ylabel('True Score', fontsize=fs)
plot.legend(fontsize=fs)
plot.set_title(str(group), fontsize=fs)
plot.set_xlim(0, 1)
plot.set_ylim(0, 1)
plot.grid(ls=':', lw=2, zorder=-100, color='grey')
plot.set_axisbelow(True)
plot.text(
x=0.030,
y=.7,
s='ECE=' + str(round(ece, 3)),
size=fs,
bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))
def plot_roc_curve(plot,
scores,
e,
t,
a,
folds,
groups,
quant):
"""Function to plot ROC at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted ROC curve.
Args:
plot:
a trained survival analysis model
(output of fair_survival_analysis.models.train_model).
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
Returns:
A plotted matplotlib ROC curve.
"""
fs = 16
fprs, tprs, tprs_std = {}, {}, {}
fprs['all'] = {}
tprs['all'] = {}
for group in groups:
fprs[group] = {}
tprs[group] = {}
for fold in set(folds):
str_train = baseline_models.structureForEval_(t[folds != fold],
e[folds != fold])
str_test = baseline_models.structureForEval_(t[folds == fold],
e[folds == fold])
atr = a[folds != fold]
ate = a[folds == fold]
for group in groups:
te_protg = (ate == group)
tr_protg = (atr == group)
roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],
-scores[fold][te_protg], [quant])
fprs[group][fold], tprs[group][fold] = roc_m[0][0][1], roc_m[0][0][0]
roc_m = cumulative_dynamic_auc(str_train, str_test, -scores[fold], [quant])
fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]
cols = ['b', 'r', 'g']
roc_auc = {}
j = 0
for group in groups + ['all']:
all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))
# The ROC curves are interpolated at these points.
mean_tprs = []
for i in set(folds):
mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))
# Finally the interpolated curves are averaged over to compute AUC.
mean_tpr = np.mean(mean_tprs, axis=0)
std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)
fprs[group]['macro'] = all_fpr
tprs[group]['macro'] = mean_tpr
tprs_std[group] = std_tpr
roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])
plot.plot(
all_fpr,
mean_tpr,
c=cols[j],
label=group + ' AUC:' + str(round(roc_auc[group], 3)))
plot.fill_between(
all_fpr,
mean_tpr - std_tpr,
mean_tpr + std_tpr,
color=cols[j],
alpha=0.25)
j += 1
plot.set_xlabel('False Positive Rate', fontsize=fs)
plot.set_ylabel('True Positive Rate', fontsize=fs)
plot.legend(fontsize=fs)
plot.set_xscale('log')
def plot_results(trained_model, model, fair_strategy, x, e, t, a,\
folds, groups, quantiles, strat='quantile', adj='IPCWcon'):
"""Function to plot the ROC and Calibration curves from a survival model.
Accepts a trained survival analysis model, features and horizon of interest
and generates the IPCW adjusted ROC curve and Calibration curve at
pre-specified horizons of time.
Args:
trained_model:
a trained survival analysis model
(output of fair_survival_analysis.models.train_model).
model:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
fair_strategy:
List of the demogrpahics to adjust for. Must be same as what was used to
originally train the model.
x:
a numpy array of input features.
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quantiles:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj:
Returns:
a numpy vector of risks P(T>t) at the horizon "quant".
"""
mpl.rcParams['hatch.linewidth'] = 2.0
fig, big_axes = plt.subplots(
figsize=(8 * (len(groups) + 1), 6 * len(quantiles)),
nrows=len(quantiles),
ncols=1)
plt.subplots_adjust(hspace=0.4)
i = 0
for _, big_ax in enumerate(big_axes, start=1):
big_ax.set_title(
'Receiver Operator Characteristic and Calibration at t=' +
str(quantiles[i]) + '\n',
fontsize=16)
big_ax.tick_params(
labelcolor=(1., 1., 1., 0.0),
top='off',
bottom='off',
left='off',
right='off')
i += 1
for i in range(len(quantiles)):
if model in ['coupled_deep_cph', 'coupled_deep_cph_vae']:
scores = models.predict_scores(trained_model, groups, x, a, folds,
quantiles[i])
else:
scores = baseline_models.predict_scores(trained_model, model,
fair_strategy, x, a, folds,
quantiles[i])
for j in range(len(groups) + 1):
pt = (i * (len(groups) + 1) + j + 1)
ax = fig.add_subplot(len(quantiles), len(groups) + 1, pt)
if j:
plot_calibration_curve(
ax,
scores,
e,
t,
a,
folds,
groups[j - 1],
quantiles[i],
strat=strat,
adj=adj)
else:
plot_roc_curve(
ax,
scores,
e,
t,
a,
folds,
groups,
quantiles[i])
plt.show()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
7d243272cbf1128a99ce2edb7405e3f9f6ba18fa | 0807fd06d8d50cd51228877368d3af65cc1e4c8b | /course-3/module-9-precision-recall-assignment.py | 8a8bef1eb8a2788c19e32cb34ee6e09c7e104d22 | [] | no_license | gigix/machine-learning-specialization | f2bf815e8242c0ca8fe431c57a29bb84ad873ede | 7dbef3eb807fc557596bf2187dd05744503f42e8 | refs/heads/master | 2021-07-21T15:11:44.022004 | 2017-11-02T01:25:45 | 2017-11-02T01:25:45 | 109,201,661 | 0 | 0 | null | 2017-11-02T01:11:45 | 2017-11-02T01:11:45 | null | UTF-8 | Python | false | false | 4,151 | py | import string
import graphlab
import numpy as np
products = graphlab.SFrame('amazon_baby.gl/')
def remove_punctuation(text):
return text.translate(None, string.punctuation)
# Remove punctuation.
review_clean = products['review'].apply(remove_punctuation)
# Count words
products['word_count'] = graphlab.text_analytics.count_words(review_clean)
# Drop neutral sentiment reviews.
products = products[products['rating'] != 3]
# Positive sentiment to +1 and negative sentiment to -1
target = 'sentiment'
products[target] = products['rating'].apply(lambda rating: +1 if rating > 3 else -1)
train_data, test_data = products.random_split(.8, seed=1)
# model = graphlab.logistic_classifier.create(train_data, target=target,
# features=['word_count'], validation_set=None)
# model.save('module-9-model-1')
model = graphlab.load_model('module-9-model-1')
print('===== QUIZ 1 =====')
accuracy = model.evaluate(test_data, metric='accuracy')['accuracy']
print "Test Accuracy: %s" % accuracy
baseline = len(test_data[test_data[target] == 1]) / float(len(test_data))
print "Baseline accuracy (majority class classifier): %s" % baseline
print('===== QUIZ 2 =====')
confusion_matrix = model.evaluate(test_data, metric='confusion_matrix')['confusion_matrix']
print confusion_matrix
def cost(model, dataset):
confusion_matrix = model.evaluate(dataset, metric='confusion_matrix')['confusion_matrix']
errors = confusion_matrix[
confusion_matrix['target_label'] != confusion_matrix['predicted_label']]
false_positive = errors[errors['predicted_label'] == 1]['count']
false_negative = errors[errors['predicted_label'] == -1]['count']
return false_positive * 100 + false_negative
print('===== QUIZ 3 =====')
print('Cost of the model on test data: %s' % cost(model, test_data))
print('===== QUIZ 4~5 =====')
precision = model.evaluate(test_data, metric='precision')['precision']
print "Precision on test data: %s; false positive rate: %s" % (precision, 1 - precision)
print('===== QUIZ 6~7 =====')
recall = model.evaluate(test_data, metric='recall')['recall']
print "Recall on test data: %s" % recall
def apply_threshold(probabilities, threshold):
return probabilities.apply(lambda p: +1 if p >= threshold else -1)
probabilities = model.predict(test_data, output_type='probability')
predictions_with_default_threshold = apply_threshold(probabilities, 0.5)
predictions_with_high_threshold = apply_threshold(probabilities, 0.9)
print('===== QUIZ 8~9 =====')
print "Number of positive predicted reviews (threshold = 0.5): %s" % \
len(predictions_with_default_threshold[predictions_with_default_threshold == 1])
print "Number of positive predicted reviews (threshold = 0.9): %s" % \
len(predictions_with_high_threshold[predictions_with_high_threshold == 1])
print('===== QUIZ 10 =====')
threshold_values = np.linspace(0.5, 1, num=100)
probabilities = model.predict(test_data, output_type='probability')
for threshold in threshold_values:
predictions = apply_threshold(probabilities, threshold)
precision = graphlab.evaluation.precision(test_data[target], predictions)
recall = graphlab.evaluation.recall(test_data[target], predictions)
print('Threshold: %s - precision: %s; recall: %s' % (threshold, precision, recall))
print('===== QUIZ 11 =====')
predictions = apply_threshold(probabilities, 0.98)
print('----- confusion matrix when threshold is 0.98 -----')
confusion_matrix = graphlab.evaluation.confusion_matrix(test_data[target], predictions)
print(confusion_matrix)
print('===== QUIZ 12~13 =====')
baby_reviews = test_data[test_data['name'].apply(lambda x: 'baby' in x.lower())]
probabilities = model.predict(baby_reviews, output_type='probability')
threshold_values = np.linspace(0.5, 1, num=100)
for threshold in threshold_values:
predictions = apply_threshold(probabilities, threshold)
precision = graphlab.evaluation.precision(baby_reviews[target], predictions)
recall = graphlab.evaluation.recall(baby_reviews[target], predictions)
print('Threshold: %s - precision: %s; recall: %s' % (threshold, precision, recall))
| [
"gigix1980@gmail.com"
] | gigix1980@gmail.com |
5b5d3fa4af939811e1b58790f393c15d386d3e86 | afed7dce9028d62c85753af0e3619c77ccfeb333 | /wetwilly | eab7c618198eb252b5c8d65498302ae49db2e00f | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | arikat/SecretBox | 809dccef2ebb6d112a1112411ade001f0d043c18 | 474ba4dabcb34ff1a62334715927d30325d27f3c | refs/heads/master | 2021-07-04T09:23:57.359550 | 2020-10-26T20:01:53 | 2020-10-26T20:01:53 | 192,941,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | #!/usr/bin/env python
from __future__ import division
from Bio import PDB
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-c", type=float, default=4.5, help="Interaction cutoff (Angstrom)")
args, unk = parser.parse_known_args()
CUTOFF = args.c
PDB_IN = unk[0]
p = PDB.PDBParser(QUIET=True)
structure = p.get_structure('whatever', PDB_IN)
for resi in structure.get_residues():
if resi.id[0] == 'W':
for atom in resi:
if atom.id == 'O':
sys.stdout.write('> %s\n' % (atom.get_parent().id[1]))
for model in structure:
for chain in model:
for residue in chain:
if any([(atom-a <= CUTOFF) for a in residue]):
sys.stdout.write('%s\t%s\t%s\n' % (chain.id, residue.id[1], residue.get_resname()))
| [
"noreply@github.com"
] | arikat.noreply@github.com | |
8a7d80e6f5f06bd496743c7d5473e482fa893c14 | 636b31e9bef7c82183f288d441f08d448af49f9c | /parlai/agents/drqa/drqa.py | 6df22714225e1ad0f750375803aaa74add2eaf54 | [
"MIT"
] | permissive | ShaojieJiang/tldr | a553051c6b615237212082bbc21b09e9069929af | c878ed10addbae27fa86cc0560f168b14b94cf42 | refs/heads/master | 2023-08-21T23:36:45.148685 | 2020-04-08T15:09:35 | 2020-04-08T15:09:35 | 247,427,860 | 12 | 0 | MIT | 2023-08-11T19:52:22 | 2020-03-15T08:25:21 | Python | UTF-8 | Python | false | false | 14,162 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
(A partial) implementation of the DrQa Document Reader from:
Danqi Chen, Adam Fisch, Jason Weston, Antoine Bordes. 2017.
Reading Wikipedia to Answer Open-Domain Questions.
In Association for Computational Linguistics (ACL).
Link: https://arxiv.org/abs/1704.00051
Note:
To use pretrained word embeddings, set the --embedding_file path argument.
GloVe is recommended, see http://nlp.stanford.edu/data/glove.840B.300d.zip.
To automatically download glove, use:
--embedding_file zoo:glove_vectors/glove.840B.300d.txt
"""
try:
import torch
except ImportError:
raise ImportError('Need to install pytorch: go to pytorch.org')
import bisect
import os
import numpy as np
import json
import random
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
from parlai.core.build_data import modelzoo_path
from . import config
from .utils import build_feature_dict, vectorize, batchify, normalize_text
from .model import DocReaderModel
# ------------------------------------------------------------------------------
# Dictionary.
# ------------------------------------------------------------------------------
class SimpleDictionaryAgent(DictionaryAgent):
"""
Override DictionaryAgent to use spaCy tokenizer.
"""
@staticmethod
def add_cmdline_args(argparser):
group = DictionaryAgent.add_cmdline_args(argparser)
group.add_argument(
'--pretrained_words',
type='bool',
default=True,
help='Use only words found in provided embedding_file',
)
group.set_defaults(dict_tokenizer='spacy')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Index words in embedding file
if (
self.opt['pretrained_words']
and self.opt.get('embedding_file')
and not self.opt.get('trained', False)
):
print('[ Indexing words with embeddings... ]')
self.embedding_words = set()
self.opt['embedding_file'] = modelzoo_path(
self.opt.get('datapath'), self.opt['embedding_file']
)
with open(self.opt['embedding_file']) as f:
for line in f:
w = normalize_text(line.rstrip().split(' ')[0])
self.embedding_words.add(w)
print('[ Num words in set = %d ]' % len(self.embedding_words))
else:
self.embedding_words = None
def add_to_dict(self, tokens):
"""
Builds dictionary from the list of provided tokens.
Only adds words contained in self.embedding_words, if not None.
"""
for token in tokens:
if self.embedding_words is not None and token not in self.embedding_words:
continue
self.freq[token] += 1
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
# ------------------------------------------------------------------------------
# Document Reader.
# ------------------------------------------------------------------------------
class DrqaAgent(Agent):
@staticmethod
def add_cmdline_args(argparser):
config.add_cmdline_args(argparser)
DrqaAgent.dictionary_class().add_cmdline_args(argparser)
@staticmethod
def dictionary_class():
return SimpleDictionaryAgent
def __init__(self, opt, shared=None):
if opt.get('numthreads', 1) > 1:
raise RuntimeError("numthreads > 1 not supported for this model.")
super().__init__(opt, shared)
# All agents keep track of the episode (for multiple questions)
self.episode_done = True
self.opt['cuda'] = not self.opt['no_cuda'] and torch.cuda.is_available()
if shared is not None:
# model has already been set up
self.word_dict = shared['word_dict']
self.model = shared['model']
self.feature_dict = shared['feature_dict']
else:
# set up model
self.word_dict = DrqaAgent.dictionary_class()(opt)
if self.opt.get('model_file') and os.path.isfile(opt['model_file']):
self._init_from_saved(opt['model_file'])
else:
if self.opt.get('init_model'):
self._init_from_saved(opt['init_model'])
else:
self._init_from_scratch()
if self.opt['cuda']:
print('[ Using CUDA (GPU %d) ]' % opt['gpu'])
torch.cuda.set_device(opt['gpu'])
self.model.cuda()
# Set up params/logging/dicts
self.id = self.__class__.__name__
config.set_defaults(self.opt)
self.n_examples = 0
def _init_from_scratch(self):
self.feature_dict = build_feature_dict(self.opt)
self.opt['num_features'] = len(self.feature_dict)
self.opt['vocab_size'] = len(self.word_dict)
print('[ Initializing model from scratch ]')
self.model = DocReaderModel(self.opt, self.word_dict, self.feature_dict)
self.model.set_embeddings()
def _init_from_saved(self, fname):
print('[ Loading model %s ]' % fname)
saved_params = torch.load(fname, map_location=lambda storage, loc: storage)
if 'word_dict' in saved_params:
# for compatibility with old saves
self.word_dict.copy_dict(saved_params['word_dict'])
self.feature_dict = saved_params['feature_dict']
self.state_dict = saved_params['state_dict']
config.override_args(self.opt, saved_params['config'])
self.model = DocReaderModel(
self.opt, self.word_dict, self.feature_dict, self.state_dict
)
def share(self):
shared = super().share()
shared['word_dict'] = self.word_dict
shared['model'] = self.model
shared['feature_dict'] = self.feature_dict
return shared
def observe(self, observation):
# shallow copy observation (deep copy can be expensive)
observation = observation.copy()
if not self.episode_done and not observation.get('preprocessed', False):
dialogue = self.observation['text'].split('\n')[:-1]
dialogue.extend(observation['text'].split('\n'))
observation['text'] = '\n'.join(dialogue)
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def act(self):
"""Update or predict on a single example (batchsize = 1)."""
reply = {'id': self.getID()}
ex = self._build_ex(self.observation)
if ex is None:
return reply
batch = batchify(
[ex], null=self.word_dict[self.word_dict.null_token], cuda=self.opt['cuda']
)
# Either train or predict
if 'labels' in self.observation:
self.n_examples += 1
self.model.update(batch)
else:
prediction, score = self.model.predict(batch)
reply['text'] = prediction[0]
reply['text_candidates'] = [prediction[0]]
reply['candidate_scores'] = [score[0]]
reply['metrics'] = {'train_loss': self.model.train_loss.avg}
return reply
def batch_act(self, observations):
"""
Update or predict on a batch of examples.
More efficient than act().
"""
batchsize = len(observations)
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
# Some examples will be None (no answer found). Filter them.
examples = [self._build_ex(obs) for obs in observations]
valid_inds = [i for i in range(batchsize) if examples[i] is not None]
examples = [ex for ex in examples if ex is not None]
# If all examples are invalid, return an empty batch.
if len(examples) == 0:
return batch_reply
# Else, use what we have (hopefully everything).
batch = batchify(
examples,
null=self.word_dict[self.word_dict.null_token],
cuda=self.opt['cuda'],
)
# Either train or predict
if 'labels' in observations[0]:
try:
self.n_examples += len(examples)
self.model.update(batch)
except RuntimeError as e:
# catch out of memory exceptions during fwd/bck (skip batch)
if 'out of memory' in str(e):
print(
'| WARNING: ran out of memory, skipping batch. '
'if this happens frequently, decrease batchsize or '
'truncate the inputs to the model.'
)
batch_reply[0]['metrics'] = {'skipped_batches': 1}
return batch_reply
else:
raise e
else:
predictions, scores = self.model.predict(batch)
for i in range(len(predictions)):
batch_reply[valid_inds[i]]['text'] = predictions[i]
batch_reply[valid_inds[i]]['text_candidates'] = [predictions[i]]
batch_reply[valid_inds[i]]['candidate_scores'] = [scores[i]]
batch_reply[0]['metrics'] = {
'train_loss': self.model.train_loss.avg * batchsize
}
return batch_reply
def save(self, fname=None):
"""
Save the parameters of the agent to a file.
"""
fname = self.opt.get('model_file', None) if fname is None else fname
if fname:
print("[ saving model: " + fname + " ]")
self.opt['trained'] = True
self.model.save(fname)
# save opt file
with open(fname + '.opt', 'w') as handle:
json.dump(self.opt, handle)
# --------------------------------------------------------------------------
# Helper functions.
# --------------------------------------------------------------------------
def _build_ex(self, ex):
"""
Find the token span of the answer in the context for this example.
If a token span cannot be found, return None. Otherwise, torchify.
"""
# Check if empty input (end of epoch)
if 'text' not in ex:
return
# Split out document + question
inputs = {}
fields = ex['text'].strip().split('\n')
# Data is expected to be text + '\n' + question
if len(fields) < 2:
raise RuntimeError('Invalid input. Is task a QA task?')
paragraphs, question = fields[:-1], fields[-1]
if len(fields) > 2 and self.opt.get('subsample_docs', 0) > 0 and 'labels' in ex:
paragraphs = self._subsample_doc(
paragraphs, ex['labels'], self.opt.get('subsample_docs', 0)
)
document = ' '.join(paragraphs)
inputs['document'], doc_spans = self.word_dict.span_tokenize(document)
inputs['question'] = self.word_dict.tokenize(question)
inputs['target'] = None
# Find targets (if labels provided).
# Return if we were unable to find an answer.
if 'labels' in ex:
if 'answer_starts' in ex:
# randomly sort labels and keep the first match
labels_with_inds = list(zip(ex['labels'], ex['answer_starts']))
random.shuffle(labels_with_inds)
for ans, ch_idx in labels_with_inds:
# try to find an answer_start matching a tokenized answer
start_idx = bisect.bisect_left(
list(x[0] for x in doc_spans), ch_idx
)
end_idx = start_idx + len(self.word_dict.tokenize(ans)) - 1
if end_idx < len(doc_spans):
inputs['target'] = (start_idx, end_idx)
break
else:
inputs['target'] = self._find_target(inputs['document'], ex['labels'])
if inputs['target'] is None:
return
# Vectorize.
inputs = vectorize(self.opt, inputs, self.word_dict, self.feature_dict)
# Return inputs with original text + spans (keep for prediction)
return inputs + (document, doc_spans)
def _find_target(self, document, labels):
"""
Find the start/end token span for all labels in document.
Return a random one for training.
"""
def _positions(d, l):
for i in range(len(d)):
for j in range(i, min(len(d) - 1, i + len(l))):
if l == d[i : j + 1]:
yield (i, j)
targets = []
for label in labels:
targets.extend(_positions(document, self.word_dict.tokenize(label)))
if len(targets) == 0:
return
return targets[np.random.choice(len(targets))]
def _subsample_doc(self, paras, labels, subsample):
"""
Subsample paragraphs from the document (mostly for training speed).
"""
# first find a valid paragraph (with a label)
pi = -1
for ind, p in enumerate(paras):
for l in labels:
if p.find(l):
pi = ind
break
if pi == -1:
# failed
return paras[0:1]
new_paras = []
if pi > 0:
for _i in range(min(subsample, pi - 1)):
ind = random.randint(0, pi - 1)
new_paras.append(paras[ind])
new_paras.append(paras[pi])
if pi < len(paras) - 1:
for _i in range(min(subsample, len(paras) - 1 - pi)):
ind = random.randint(pi + 1, len(paras) - 1)
new_paras.append(paras[ind])
return new_paras
| [
"s.jiang@uva.nl"
] | s.jiang@uva.nl |
5085ca2798782462b8657b441de13de0fdb34be1 | 7a12db378b4b1947eaa07924200404597b0d2cc3 | /contrib/seeds/makeseeds.py | c37c7b505c487c8e9af3f53d35a572c5289ddb9a | [
"MIT"
] | permissive | ProfProfcompile/zarhexcash | fa8471188a1a7e17b83fe6c7cb01152e0e2da0dd | 735e51403f267b4f30474508cb6297c622dccaa9 | refs/heads/master | 2020-06-19T20:35:03.561192 | 2020-01-02T09:24:22 | 2020-01-02T09:24:22 | 196,862,120 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,520 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/ZarhexcashCore:1.0.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| [
"44237606+ProfProfcompile@users.noreply.github.com"
] | 44237606+ProfProfcompile@users.noreply.github.com |
c6b02f8d1c64284325e44582082dfdc3cc6e8f81 | cd576030dcd36c2f516af49fe530fd2dac4ff0df | /slrealizer/tests/test_om10realizer.py | 025251626d41a24790f51e22de2604634adc20d0 | [
"BSD-3-Clause"
] | permissive | LSSTDESC/SLRealizer | 601af6e944875be64d3f57156f3b9dd2e04b299a | ff630e5fc951f85e05dca5c135b42a7b563ea1ac | refs/heads/master | 2020-04-06T04:46:43.583437 | 2018-09-10T13:19:06 | 2018-09-10T13:19:06 | 82,958,782 | 1 | 3 | BSD-3-Clause | 2018-10-12T18:12:29 | 2017-02-23T18:34:40 | Python | UTF-8 | Python | false | false | 4,777 | py | from __future__ import absolute_import, division, print_function
import unittest
import os, sys
import shutil
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from om10 import DB
from slrealizer import OM10Realizer
class OM10RealizerTest(unittest.TestCase):
"""
Tests the OM10Realizer subclass.
NOTE
Execute these tests with:
nosetests
from anywhere in the module, provided you have run
pip install nose
"""
@classmethod
def setUpClass(cls):
from slrealizer import Dataloader
# Output catalogs
tests_dir = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.join(tests_dir, 'test_output', 'test_om10realizer')
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
output_paths = {
'rowbyrow_analytical_path': os.path.join(output_dir, 'rowbyrow_ana_source.csv'),
'rowbyrow_hsm_numerical_path': os.path.join(output_dir, 'rowbyrow_hsm_num_source.csv'),
'rowbyrow_raw_numerical_path': os.path.join(output_dir, 'rowbyrow_raw_num_source.csv'),
'vectorized_path': os.path.join(output_dir, 'vectorized_source.csv'),
'object_path': os.path.join(output_dir, 'object.csv'),
}
for k, v in output_paths.items():
setattr(cls, k, v)
# Instantiate Dataloader
dataloader = Dataloader()
# Read in input data files
test_om10_db = dataloader.read(filename='om10', is_test=True)
test_obs_df = dataloader.read(filename='observation', is_test=True)
# Instantiate OM10Realizer
cls.realizer = OM10Realizer(observation=test_obs_df, catalog=test_om10_db, debug=True, add_moment_noise=False, add_flux_noise=False)
cls.lens_info = test_om10_db.sample[0]
cls.obs_info = test_obs_df.loc[0]
def test_om10_to_galsim(self):
""" Tests whether _om10_to_galsim method runs """
self.realizer._om10_to_galsim(lens_info=self.lens_info, band=self.obs_info['filter'])
def test_draw_system(self):
""" Tests whether draw_system method runs """
self.realizer.draw_system(lens_info=self.lens_info, obs_info=self.obs_info)
def test_estimate_hsm(self):
""" Tests whether estimate_parameters method runs """
self.realizer.estimate_parameters(lens_info=self.lens_info, obs_info=self.obs_info)
def test_om10_to_lsst(self):
""" Tests whether _om10_to_lsst runs """
self.realizer._om10_to_lsst(lens_info=self.lens_info, obs_info=self.obs_info)
def test_create_source_row(self):
"""
Tests whether create_source_row runs with each of the options
for moment calculation method
"""
for m in ["analytical", "hsm", "raw_numerical"]:
self.realizer.create_source_row(lens_info=self.lens_info, obs_info=self.obs_info, method=m)
def test_make_source_table_rowbyrow_numerical(self):
"""
Tests whether make_source_table_rowbyrow runs with two numerical options
for moment calculation method, 'hsm' and 'raw_numerical'
"""
self.realizer.make_source_table_rowbyrow(save_file=self.rowbyrow_raw_numerical_path, method="raw_numerical")
self.realizer.make_source_table_rowbyrow(save_file=self.rowbyrow_hsm_numerical_path, method="hsm")
def test_make_source_table_analytical(self):
"""
Tests whether make_source_table_vectorized run and
whether make_source_table_rowbyrow runs with method = 'analytical',
and checks whether the two output source tables have the same values
"""
rowbyrow = self.realizer.make_source_table_rowbyrow(save_file=self.rowbyrow_analytical_path, method="analytical")
vectorized = self.realizer.make_source_table_vectorized(output_source_path=self.vectorized_path, include_time_variability=False)
#print(rowbyrow.dtypes, vectorized.dtypes)
# Select only the columns with numeric values, and turn into Numpy array
rowbyrow_float = rowbyrow.select_dtypes(include=[np.number]).values
vectorized_float = vectorized.select_dtypes(include=[np.number]).values
self.assertTrue(np.allclose(rowbyrow_float, vectorized_float, rtol=1e-05, atol=1e-05))
def test_make_object_table(self):
""" Tests whether make_object_table runs """
self.realizer.make_source_table_vectorized(output_source_path=self.vectorized_path, include_time_variability=False)
self.realizer.make_object_table(include_std=False, source_table_path=self.vectorized_path, object_table_path=self.object_path)
if __name__ == '__main__':
unittest.main()
| [
"jiwon.christine.park@gmail.com"
] | jiwon.christine.park@gmail.com |
65279548e8a88e4c82e473615bfa725dcd128099 | 5a0dfe1326bb166d6dfaf72ce0f89ab06e963e2c | /leetcode/lc169.py | 1d4cf7d118d92554f11f87503e95b0dee3af9a32 | [] | no_license | JasonXJ/algorithms | 7bf6a03c3e26f917a9f91c53fc7b2c65669f7692 | 488d93280d45ea686d30b0928e96aa5ed5498e6b | refs/heads/master | 2020-12-25T15:17:44.345596 | 2018-08-18T07:20:27 | 2018-08-18T07:20:27 | 67,798,458 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | from collections import Counter
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
counter = Counter(nums)
target = len(nums) // 2
for x, c in counter.items():
if c > target:
return x
| [
"lxj2048@gmail.com"
] | lxj2048@gmail.com |
7662a622d333a4cf401952e87276e5c0c227d916 | d03fb4f30b2984a48b0df0448b245c5979e5f412 | /5_Exam_if_operator/5_1_2_chess_board.py | 55ff5804cc02ba7fa0f3aaa7f207b86f5b227d53 | [] | no_license | dudrill/Python_generation_basics | 6e63a06d0220fb30599fd273d587dc036551c3b0 | f4efcc669b20ef9155fbc9ba50ae6afb12bc6f30 | refs/heads/main | 2023-01-07T16:26:34.125380 | 2020-11-11T18:59:59 | 2020-11-11T18:59:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | x1, y1, x2, y2 = [int(input()) for i in 'abcd']
if ((x1+y1)%2==0 and (x2+y2)%2==0) or ((x1+y1)%2==1 and (x2+y2)%2==1):
print('YES')
else:
print('NO') | [
"drewnovok@gmail.com"
] | drewnovok@gmail.com |
883ea3e81dc401577ccc5f44a5df3b420557a2a6 | 1eb3c8c5d9b20802458a04ee7a4795b421452c3f | /Tutorial_6_draw/polygon.py | 5ed9900ac6793dd6c36fccf1bbae5b00dd11818c | [] | no_license | YTGhost/learnOpenCV | 8f77deb3fe6ea8b26f17c538933e48773278ed8d | 8c55c7fd88c22e38cabaaf84a3f1ae3bcfc3ed9e | refs/heads/master | 2020-06-22T02:52:15.743240 | 2019-08-04T13:56:34 | 2019-08-04T13:56:34 | 197,615,398 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | # 画一个黄色的具有四个顶点的多边形,并添加‘OpenCV’文字
import cv2 as cv
import numpy as np
# Create a black image
img = np.zeros((512,512,3), np.uint8)
#创建一个图像,高为512,宽为512,三通道,数据类型为无符号8位
font = cv.FONT_HERSHEY_SIMPLEX
pts = np.array([[10,5],[20,30],[70,20],[50,10]],np.int32)
pts = pts.reshape((-1,1,2))
# 这里 reshape 的第一个参数为-1,表明这一维的长度是根据后面的维数计算出来的。
# 如果第三个参数是False,我们得到的多边形是不闭合的(首尾不相连)。
cv.polylines(img,[pts],True,(0,0,255),1) #图像、点集、是否闭合、颜色、线条粗细
cv.putText(img,'OpenCV',(10,500),font,4,(255,255,255),2,cv.LINE_AA) #图像、绘制的文字、位置、字型、字体大小、文字颜色、粗细、线型
#linetype用cv.LINE_AA字体会流畅一些,更好看
cv.imshow('polygon',img)
cv.waitKey()
| [
"283304489@qq.com"
] | 283304489@qq.com |
67e1dc92e4c3101dcb85e0b56769ec96f9f6361b | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/pytype/pytype/tests/test_stdlib.py | e3b1ef911e8d6b21610d3a1d5f66a5fe1e1dc42b | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:28c5b4753341b4eedd875c32605f5d620461fdba9a3c92cb03f952ef8bdae4f8
size 7724
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
75e3d7f3e652742a680604c29c05b266514cd6cf | 2c046543dd86a138ef419d2f72492ca7920d3725 | /taiwanese/back/utils/drive.py | 5491ecbbd6d598c714222b7c0d7a96289e48f5f0 | [] | no_license | LukeLinEx/pkg_tw | 7e75e7a93d7b68ec969557a2224fec771e06153b | 01f2d9a2b0112cc24dc072da84eb6b008dc922c8 | refs/heads/master | 2023-03-04T23:36:26.181935 | 2021-02-17T03:33:53 | 2021-02-17T03:33:53 | 326,266,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import yaml
from copy import deepcopy
from taiwanese.config import *
from taiwanese.back.utils.gapi_connection import get_drive_service
class GDrive(object):
def __init__(self):
with open(config_path, 'r') as stream:
self.__config = yaml.safe_load(stream)
self.__service = self.get_service()
def get_service(self):
cred_path = self.config["credentials"]["google"]
return get_drive_service(cred_path)
@property
def config(self):
return deepcopy(self.__config)
@property
def service(self):
return self.__service
def list_files(self, folder_id):
# dservice = get_drive_service(cred_path)
all_files = []
page_token = None
while True:
response = self.service.files().list(
q="parents in '{}'".format(folder_id), spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=page_token).execute()
for file in response.get('files'):
# Process change
all_files.append(file)
page_token = response.get('nextPageToken', None)
if page_token is None:
break
return all_files
| [
"lukelin@tsung-yins-mbp.home"
] | lukelin@tsung-yins-mbp.home |
254e4997c633057e7b30989b811822b0d8d9e406 | d39124f15ba4dabf0b2a079a8faf36e841cfaf2e | /unsync/unsync/commands/utils/import_export.py | 795ba4b093dd7d89aab3a308bc772b203b10911a | [
"Apache-2.0"
] | permissive | PGower/Unsync | b6737e7aa117c53223e07870f07def7620d56ded | f5b6efc3289c4db6900c52d964707632fac406e1 | refs/heads/master | 2021-01-20T02:30:20.353580 | 2017-06-28T04:20:08 | 2017-06-28T04:20:08 | 89,415,331 | 1 | 0 | null | 2017-06-28T04:20:09 | 2017-04-25T23:16:41 | Python | UTF-8 | Python | false | false | 1,633 | py | """Commands to import or export the current command stack to or from a file."""
import click
import unsync
from unsync.core import NestedUnsyncCommands
import pickle
@unsync.command()
@click.option('--output-file', '-o', type=click.Path(dir_okay=False, readable=True, resolve_path=True), help='File that the command stack will be written too.')
@click.option('--exit-after', type=bool, default=True, help='Automatically exit from the unsync tool and do not run any of the exported commands after the export is complete.')
@click.pass_context
def export_command_stack(ctx, data, output_file, exit_after):
"""Export all of the commands following the export command in the call stack."""
# Perform a quick check and make sure that there are no nested export calls. It wont work and I wont support it.
if ctx.info_name in ctx.command_stack:
raise ExportNestingError('Export commands cannot be nested.')
with open(output_file, 'wb') as f:
pickle.dump(ctx.command_stack, f)
if exit_after is True:
raise click.Abort()
@unsync.command()
@click.option('--input-file', '-i', type=click.Path(dir_okay=False, readable=True, exists=True, resolve_path=True), help='File that extra commands will be read from.')
@click.pass_context
def import_command_stack(ctx, data, input_file):
"""Import the given set of commands from the input_file and insert them into the command stream after that current command."""
with open(input_file, 'rb') as f:
command_stack = pickle.load(f)
n = NestedUnsyncCommands(command_stack)
n.invoke(ctx)
class ExportNestingError(Exception):
pass
| [
"pgower@gmail.com"
] | pgower@gmail.com |
30cf50242dd2b1a2a1703eaf53954944acdf4b13 | 2192fe31e41c4758010c7258168fc661e4988991 | /deploy/wsgi.py | f67718f1d8f235cd7d605945e18b07d293a0ce0e | [] | no_license | commoncode/shopifyplus | c7b13799561a4a23bccb3f080bb846b86ab8ad29 | 3aba281a790f93b780b0df3beebdf686434896e6 | refs/heads/master | 2018-12-29T00:38:20.146735 | 2015-02-17T01:48:12 | 2015-02-17T01:48:12 | 3,792,661 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # pinax.wsgi is configured to live in projects/shopifyplus/deploy.
import os
import sys
from os.path import abspath, dirname, join
from site import addsitedir
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "shopifyplus.settings"
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler() | [
"daryl@commoncreative.com.au"
] | daryl@commoncreative.com.au |
a49e0d2f00762ae2600710718427a7bc01e96d96 | 883add54792cb105a1b986197b1f89069cc2ebb6 | /app.py | f26fdfc5996aec8c563405ce6bcf1aca332a02a7 | [] | no_license | diya-liza/flask-backend-routing | ae88d2e4dc7377bac3087585db223d0fb78f645c | 82fdc8521d9fa2395a6badaea8ee51275db64721 | refs/heads/main | 2023-06-01T23:29:32.210231 | 2021-06-17T15:48:26 | 2021-06-17T15:48:26 | 377,883,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from flask import Flask, request, jsonify
import pandas as pd
from routes import routes
app = Flask(__name__)
app.register_blueprint(routes.route)
app.run(debug=True, host='0.0.0.0') | [
"diyaliza16@gmail.com"
] | diyaliza16@gmail.com |
5a7be2721937362fcef6ff3ff71846e5b4ea34a8 | fd9e5777b876ddf204250c5dd72f4ac518e7b8dd | /util/merger.py | 55e6c0f0d71e698d55e61a75adfd2ce3ac8277c4 | [] | no_license | as-com/bot | 7fda107557e299f8835e3f245afae32ac1f3821e | d5d505b00efaad0f1b7b634118719bc4e055361b | refs/heads/master | 2021-01-15T12:28:27.903980 | 2015-04-14T15:47:16 | 2015-04-14T15:47:16 | 34,180,640 | 0 | 0 | null | 2015-04-18T19:58:29 | 2015-04-18T19:58:28 | null | UTF-8 | Python | false | false | 3,500 | py | from git import Repo
import pystache, re
class GitMerger():
"""
merges a pull request by the request of a collab
"""
def __init__(self):
self.merge_re = re.compile(self.config["merge_re"], re.IGNORECASE)
self.rebase_re = re.compile("\brebase\b", re.IGNORECASE)
def check_comment(self, number, comment, requester):
user = self.gh.user(requester)
lines = comment.splitlines()
print number
print requester
if any(self.merge_re.match(line) or self.rebase_re.match(line) for line in lines) and \
any(user == collab for collab in self.repo.iter_collaborators()) and \
any(comm.user == user and comm == comm.body for comm in self.repo.issue(number).iter_comments()):
if any(self.merge_re.match(line) for line in lines):
# merge(number, self.get_number_commits(comment))
pass
elif any(self.rebase_re.match(line) for line in lines):
rebase(number)
def get_number_commits(self, comment):
matches = [self.merge_re.match(line) for line in comment.splitlines()]
match = next(match for match in matches if match is not None)
return int(match.groups()[0])
def rebase(self, number):
pr = self.get_pull(number)
issue = self.repo.issue(number)
with open("templates/rebase.tpl") as f:
msg = pystache.render(f.read(), pr)
issue.create_comment(msg)
def merge(self, number, commits):
"""
Merge and squash the commits of a pull request if the requester is allowed
Merges in 2-3 commits depending if fast-forward is necessary
@param number int pull number
@param requester str github login
@param comment str check if comment is requesting a merge
"""
pr = self.get_pull(number)
repo = Repo(self.config["path_to_repo"])
repo.git.checkout("master")
repo.git.pull("origin", "master")
try:
#for merging
remote = repo.create_remote("temp", "https://github.com/{0}/{1}.git".format(*pr.head.repo))
repo.git.fetch(remote, pr.head.ref)
#akin: git checkout -b temp-merge temp/<branch> --track
branch_name = "{0}/{1}".format(pr.user.login, pr.head.ref)
branch_location = "{0}/{1}".format(remote.name, pr.head.ref)
repo.git.checkout(branch_name, branch_location, b=True, track=True)
branch = repo.active_branch
#soft reset/squash it
reset_commits = max(0, pr.commits - self.get_number_commits(comment))
repo.git.reset("HEAD~{0}".format(reset_commits), soft=True)
repo.git.rebase("master")
with open("templates/merge-pull-request.tpl") as f:
commit_msg = pystache.render(f.read(), pr)
print commit_msg
# repo.index.commit(commit_msg)
# repo.git.checkout("master")
# repo.git.merge(branch)
# repo.remotes.origin.push("master")
except Exception,e:
issue = self.repo.issue(pr.number)
issue.create_comment("Could not automatically squash and merge the pull request: {0}".format(str(e)))
finally:
try:
#clean up
repo.delete_remote(remote)
repo.git.branch(branch, D=True) #delete branch
except: pass
| [
"yeatesgraeme@gmail.com"
] | yeatesgraeme@gmail.com |
c0f2c2e1ee656252d16abd7f3bdd116cce608258 | 601a0284a0b65eb928bb11d08d03502796e7a11e | /code.py | eb42a42d68cdd2051ba28115f99c3c837ae83ef3 | [] | no_license | JDBrooks55/TextGame | fb82ca968f8aaf2e8b58cb4aa59adddec4a815f3 | 371b55736b4bc77fc0569ee00320dd09094bd1c6 | refs/heads/master | 2020-04-07T07:20:12.152438 | 2018-11-19T21:01:45 | 2018-11-19T21:01:45 | 158,172,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py |
x = 0
name = " "
choice = " "
inv = []
def menu():
#Welcome and name designation
print ("\n\nWelcome to 'insert name of game here'")
print ("What would you like to do?")
print ("> New Game")
print ("> Load Game")
print ("> Credits")
print ("> Exit")
choice = input (">>> ")
choice = choice.lower()
#Menu choices
if choice == 'new game' or 'n':
basement()
elif choice == 'load game':
print ("\n\n> Still being coded")
menu()
elif choice == 'credits':
print ("\n\n> Everything was made by me")
menu()
elif choice == 'exit':
quit()
else:
print ("\n\n> That's not an option. Read the menu asshole")
menu()
def basement():
#basement description
print ("\n\n> You wake up in a dark room")
print ('> "Where am I?"')
print ("> You can see light coming from up the stairs north of you")
choice = input (">>> ")
choice = choice.lower()
#basement choices
if choice == 'north' or 'n':
hallway1()
elif choice == 'i' or 'inv' or 'inventory':
print("\n\n")
print("> Inventory")
for x in inv:
print(">", x)
basement()
else:
print ("\n\n> What?")
basement()
def hallway1():
#halfway description
print ("\n\n> You are in a hallway")
print("> On the east wall you see 2 doors")
print("> One leads to a bedroom and the other leads to a living room")
print("> On the west wall you see a door leading to a kitchen")
print ("> To your north is a door that leads outside")
choice = input (">>> ")
choice = choice.lower()
if choice == 'enter bedroom':
bedroom()
elif choice == 'enter living room':
living_room()
elif choice == 'enter kitchen':
kitchen()
menu()
| [
"noreply@github.com"
] | JDBrooks55.noreply@github.com |
0026a038b30ac7462d9321fb6c835d8f99033f1e | f13348d6934b547530d4c1edaf3efdda50d5c549 | /problem 2.py | 431a55e508267466e134761460c70447c379417b | [] | no_license | zwang88/Project-Euler | 2128e26e76ac12410d3901cbfb54fe93563f7354 | 1cda70a19c0bb278cdbf8513e802488eb7b9b876 | refs/heads/master | 2021-01-19T06:05:33.946877 | 2015-02-10T23:05:11 | 2015-02-10T23:05:11 | 30,275,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | total = 0
n0 = 0
n1 = 1
while ( total < 4000000 ):
n2 = n1;
n1 = n0 + n1;
n0 = n2;
if n1 % 2 == 0:
total = total + n1
print (total)
| [
"zwang88@illinois.edu"
] | zwang88@illinois.edu |
adf0a4bcbbb7eb87e897f27bcc76e4d498088995 | 521efcd158f4c69a686ed1c63dd8e4b0b68cc011 | /airflow/providers/amazon/aws/transfers/s3_to_redshift.py | 85cd38ad0763d3cd305c02b2e2964cd1c0daf8c5 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | coutureai/RaWorkflowOrchestrator | 33fd8e253bfea2f9a82bb122ca79e8cf9dffb003 | cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f | refs/heads/main | 2022-10-01T06:24:18.560652 | 2021-12-29T04:52:56 | 2021-12-29T04:52:56 | 184,547,783 | 5 | 12 | Apache-2.0 | 2022-11-04T00:02:55 | 2019-05-02T08:38:38 | Python | UTF-8 | Python | false | false | 7,375 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from typing import List, Optional, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.redshift_sql import RedshiftSQLHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.utils.redshift import build_credentials_block
AVAILABLE_METHODS = ['APPEND', 'REPLACE', 'UPSERT']
class S3ToRedshiftOperator(BaseOperator):
"""
Executes an COPY command to load files from s3 to Redshift
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToRedshiftOperator`
:param schema: reference to a specific schema in redshift database
:type schema: str
:param table: reference to a specific table in redshift database
:type table: str
:param s3_bucket: reference to a specific S3 bucket
:type s3_bucket: str
:param s3_key: reference to a specific S3 key
:type s3_key: str
:param redshift_conn_id: reference to a specific redshift database
:type redshift_conn_id: str
:param aws_conn_id: reference to a specific S3 connection
If the AWS connection contains 'aws_iam_role' in ``extras``
the operator will use AWS STS credentials with a token
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param column_list: list of column names to load
:type column_list: List[str]
:param copy_options: reference to a list of COPY options
:type copy_options: list
:param method: Action to be performed on execution. Available ``APPEND``, ``UPSERT`` and ``REPLACE``.
:type method: str
:param upsert_keys: List of fields to use as key on upsert action
:type upsert_keys: List[str]
"""
template_fields = ('s3_bucket', 's3_key', 'schema', 'table', 'column_list', 'copy_options')
template_ext = ()
ui_color = '#99e699'
def __init__(
self,
*,
schema: str,
table: str,
s3_bucket: str,
s3_key: str,
redshift_conn_id: str = 'redshift_default',
aws_conn_id: str = 'aws_default',
verify: Optional[Union[bool, str]] = None,
column_list: Optional[List[str]] = None,
copy_options: Optional[List] = None,
autocommit: bool = False,
method: str = 'APPEND',
upsert_keys: Optional[List[str]] = None,
**kwargs,
) -> None:
if 'truncate_table' in kwargs:
warnings.warn(
"""`truncate_table` is deprecated. Please use `REPLACE` method.""",
DeprecationWarning,
stacklevel=2,
)
if kwargs['truncate_table']:
method = 'REPLACE'
kwargs.pop('truncate_table', None)
super().__init__(**kwargs)
self.schema = schema
self.table = table
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.redshift_conn_id = redshift_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.column_list = column_list
self.copy_options = copy_options or []
self.autocommit = autocommit
self.method = method
self.upsert_keys = upsert_keys
if self.method not in AVAILABLE_METHODS:
raise AirflowException(f'Method not found! Available methods: {AVAILABLE_METHODS}')
def _build_copy_query(self, copy_destination: str, credentials_block: str, copy_options: str) -> str:
column_names = "(" + ", ".join(self.column_list) + ")" if self.column_list else ''
return f"""
COPY {copy_destination} {column_names}
FROM 's3://{self.s3_bucket}/{self.s3_key}'
credentials
'{credentials_block}'
{copy_options};
"""
def execute(self, context) -> None:
redshift_hook = RedshiftSQLHook(redshift_conn_id=self.redshift_conn_id)
conn = S3Hook.get_connection(conn_id=self.aws_conn_id)
credentials_block = None
if conn.extra_dejson.get('role_arn', False):
credentials_block = f"aws_iam_role={conn.extra_dejson['role_arn']}"
else:
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
credentials = s3_hook.get_credentials()
credentials_block = build_credentials_block(credentials)
copy_options = '\n\t\t\t'.join(self.copy_options)
destination = f'{self.schema}.{self.table}'
copy_destination = f'#{self.table}' if self.method == 'UPSERT' else destination
copy_statement = self._build_copy_query(copy_destination, credentials_block, copy_options)
sql: Union[list, str]
if self.method == 'REPLACE':
sql = ["BEGIN;", f"DELETE FROM {destination};", copy_statement, "COMMIT"]
elif self.method == 'UPSERT':
keys = self.upsert_keys or redshift_hook.get_table_primary_key(self.table, self.schema)
if not keys:
raise AirflowException(
f"No primary key on {self.schema}.{self.table}. Please provide keys on 'upsert_keys'"
)
where_statement = ' AND '.join([f'{self.table}.{k} = {copy_destination}.{k}' for k in keys])
sql = [
f"CREATE TABLE {copy_destination} (LIKE {destination});",
copy_statement,
"BEGIN;",
f"DELETE FROM {destination} USING {copy_destination} WHERE {where_statement};",
f"INSERT INTO {destination} SELECT * FROM {copy_destination};",
"COMMIT",
]
else:
sql = copy_statement
self.log.info('Executing COPY command...')
redshift_hook.run(sql, autocommit=self.autocommit)
self.log.info("COPY command complete...")
| [
"noreply@github.com"
] | coutureai.noreply@github.com |
b12debf49ec0d2a6bde44a37de0bf4860dd0473f | f3b5c4a5ce869dee94c3dfa8d110bab1b4be698b | /controller/src/opserver/analytics_db.py | 09ee2eb3ed77d9627e8c6b51cff089f82123733f | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | pan2za/ctrl | 8f808fb4da117fce346ff3d54f80b4e3d6b86b52 | 1d49df03ec4577b014b7d7ef2557d76e795f6a1c | refs/heads/master | 2021-01-22T23:16:48.002959 | 2015-06-17T06:13:36 | 2015-06-17T06:13:36 | 37,454,161 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,947 | py | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_db.py
# Implementation of database purging
#
import redis
import pycassa
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pycassa.types import *
from pycassa import *
from sandesh.viz.constants import *
from sandesh.viz.constants import _NO_AUTO_PURGE_TABLES, \
_FLOW_TABLES, _STATS_TABLES, _MSG_TABLES
from pysandesh.util import UTCTimestampUsec
import code
import urllib2
import time
import json
import datetime
import pdb
import argparse
import socket
import struct
class AnalyticsDb(object):
def __init__(self, logger, cassandra_server_list,
redis_query_port, redis_password):
self._logger = logger
self._cassandra_server_list = cassandra_server_list
self._redis_query_port = redis_query_port
self._redis_password = redis_password
self._pool = None
self.connect_db()
self.number_of_purge_requests = 0
# end __init__
def connect_db(self):
try:
self._pool = ConnectionPool(COLLECTOR_KEYSPACE,
server_list=self._cassandra_server_list, timeout=None)
except Exception as e:
self._logger.error("Exception: Failure in connection to "
"AnalyticsDb %s" % e)
return -1
return None
# end connect_db
def _get_sysm(self):
for server_and_port in self._cassandra_server_list:
try:
sysm = pycassa.system_manager.SystemManager(server_and_port)
except Exception as e:
self._logger.error("Exception: SystemManager failed %s" % e)
continue
else:
return sysm
return None
# end _get_sysm
def _get_analytics_start_time(self):
try:
col_family = ColumnFamily(self._pool, SYSTEM_OBJECT_TABLE)
row = col_family.get(SYSTEM_OBJECT_ANALYTICS)
except Exception as e:
self._logger.error("Exception: analytics_start_time Failure %s" % e)
return None
# Initialize the dictionary before returning
if (SYSTEM_OBJECT_START_TIME not in row):
return None
if (SYSTEM_OBJECT_FLOW_START_TIME not in row):
row[SYSTEM_OBJECT_FLOW_START_TIME] = row[SYSTEM_OBJECT_START_TIME]
if (SYSTEM_OBJECT_STAT_START_TIME not in row):
row[SYSTEM_OBJECT_STAT_START_TIME] = row[SYSTEM_OBJECT_START_TIME]
if (SYSTEM_OBJECT_MSG_START_TIME not in row):
row[SYSTEM_OBJECT_MSG_START_TIME] = row[SYSTEM_OBJECT_START_TIME]
return row
# end _get_analytics_start_time
def _update_analytics_start_time(self, start_times):
try:
col_family = ColumnFamily(self._pool, SYSTEM_OBJECT_TABLE)
col_family.insert(SYSTEM_OBJECT_ANALYTICS, start_times)
except Exception as e:
self._logger.error("Exception: update_analytics_start_time "
"Connection Failure %s" % e)
# end _update_analytics_start_time
def set_analytics_db_purge_status(self, purge_id, purge_cutoff):
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=self._redis_query_port, password=self._redis_password)
redish.hset('ANALYTICS_DB_PURGE', 'status', 'running')
redish.hset('ANALYTICS_DB_PURGE', 'purge_input', str(purge_cutoff))
redish.hset('ANALYTICS_DB_PURGE', 'purge_start_time',
UTCTimestampUsec())
redish.hset('ANALYTICS_DB_PURGE', 'purge_id', purge_id)
except redis.exceptions.ConnectionError:
self._logger.error("Exception: "
"Failure in connection to redis-server")
response = {'status': 'failed',
'reason': 'Failure in connection to redis-server'}
return response
except redis.exceptions.ResponseError:
self._logger.error("Exception: "
"Redis authentication failed")
response = {'status': 'failed',
'reason': 'Redis authentication failed'}
return response
return None
# end set_analytics_db_purge_status
def delete_db_purge_status(self):
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=self._redis_query_port, password=self._redis_password)
redish.delete('ANALYTICS_DB_PURGE')
except redis.exceptions.ConnectionError:
self._logger.error("Exception: "
"Failure in connection to redis-server")
except redis.exceptions.ResponseError:
self._logger.error("Exception: "
"Redis authentication failed")
# end delete_db_purge_status
def get_analytics_db_purge_status(self, redis_list):
for redis_ip_port in redis_list:
try:
redish = redis.StrictRedis(redis_ip_port[0],
redis_ip_port[1], db=0,
password=self._redis_password)
if (redish.exists('ANALYTICS_DB_PURGE')):
return redish.hgetall('ANALYTICS_DB_PURGE')
except redis.exceptions.ConnectionError:
self._logger.error("Exception: "
"Failure in connection to redis-server")
response = {'status': 'failed',
'reason': 'Failure in connection to redis-server: '
+ redis_ip_port[0]}
return response
except redis.exceptions.ResponseError:
self._logger.error("Exception: "
"Redis authentication failed")
response = {'status': 'failed',
'reason': 'Redis authentication failed'}
return response
return None
# end get_analytics_db_purge_status
def db_purge(self, purge_cutoff, purge_id):
total_rows_deleted = 0 # total number of rows deleted
purge_error_details = []
if (self._pool == None):
self.connect_db()
if not self._pool:
self._logger.error('Connection to AnalyticsDb has Timed out')
purge_error_details.append('Connection to AnalyticsDb has Timed out')
return (-1, purge_error_details)
sysm = self._get_sysm()
if (sysm == None):
self._logger.error('Failed to connect SystemManager')
purge_error_details.append('Failed to connect SystemManager')
return (-1, purge_error_details)
try:
table_list = sysm.get_keyspace_column_families(COLLECTOR_KEYSPACE)
except Exception as e:
self._logger.error("Exception: Purge_id %s Failed to get "
"Analytics Column families %s" % (purge_id, e))
purge_error_details.append("Exception: Failed to get "
"Analytics Column families %s" % (e))
return (-1, purge_error_details)
# delete entries from message table
msg_table = COLLECTOR_GLOBAL_TABLE
# total number of rows deleted from this table
msg_table_deleted = 0
try:
msg_cf = pycassa.ColumnFamily(self._pool, msg_table)
except Exception as e:
self._logger.error("purge_id %s Failure in fetching "
"message table columnfamily %s" % e)
purge_error_details.append("Failure in fetching "
"message table columnfamily %s" % e)
return (-1, purge_error_details)
for table in table_list:
# purge from index tables
if (table not in _NO_AUTO_PURGE_TABLES):
self._logger.info("purge_id %s deleting old records from "
"table: %s" % (purge_id, table))
# determine purge cutoff time
if (table in _FLOW_TABLES):
purge_time = purge_cutoff['flow_cutoff']
elif (table in _STATS_TABLES):
purge_time = purge_cutoff['stats_cutoff']
elif (table in _MSG_TABLES):
purge_time = purge_cutoff['msg_cutoff']
else:
purge_time = purge_cutoff['other_cutoff']
del_msg_uuids = [] # list of uuids of messages to be deleted
# total number of rows deleted from each table
per_table_deleted = 0
try:
cf = pycassa.ColumnFamily(self._pool, table)
except Exception as e:
self._logger.error("purge_id %s Failure in fetching "
"the columnfamily %s" % e)
purge_error_details.append("Failure in fetching "
"the columnfamily %s" % e)
return (-1, purge_error_details)
b = cf.batch()
try:
# get all columns only in case of one message index table
if (table is MESSAGE_TABLE_SOURCE):
cols_to_fetch = 1000000
else:
cols_to_fetch = 1
for key, cols in cf.get_range(column_count=cols_to_fetch):
t2 = key[0]
# each row will have equivalent of 2^23 = 8388608 usecs
row_time = (float(t2)*pow(2, RowTimeInBits))
if (row_time < purge_time):
per_table_deleted +=1
total_rows_deleted +=1
if (table is MESSAGE_TABLE_SOURCE):
# get message table uuids to delete
del_msg_uuids.append(list(cols.values()))
try:
b.remove(key)
except Exception as e:
self._logger.error("Exception: Purge_id:%s table:%s "
"error: %s" % (purge_id, table, e))
b = cf.batch() # create a new batch job
continue
try:
b.send()
except Exception as e:
self._logger.error("Exception: Purge_id:%s table:%s "
"error: %s" % (purge_id, table, e))
if len(del_msg_uuids) != 0:
# delete uuids from the message table
b_msgtbl = msg_cf.batch()
try:
for key in del_msg_uuids:
msg_table_deleted +=1
total_rows_deleted +=1
b_msgtbl.remove(key)
b_msgtbl.send()
except Exception as e:
self._logger.error("Exception: Purge_id %s message table "
"doesnot have uuid %s" % (purge_id, e))
purge_error_details.append("Exception: Message table "
"doesnot have uuid %s" % (e))
except Exception as e:
self._logger.error("Exception: Purge_id:%s table:%s "
"error: %s" % (purge_id, table, e))
purge_error_details.append("Exception: Table:%s "
"error: %s" % (table, e))
continue
self._logger.info("Purge_id %s deleted %d rows from table: %s"
% (purge_id, per_table_deleted, table))
self._logger.info("Purge_id %s deleted %d rows from table: %s"
% (purge_id, msg_table_deleted, COLLECTOR_GLOBAL_TABLE))
# end deleting all relevant UUIDs from message table
self._logger.info("Purge_id %s total rows deleted: %s"
% (purge_id, total_rows_deleted))
return (total_rows_deleted, purge_error_details)
# end purge_data
def get_dbusage_info(self, rest_api_port):
"""Collects database usage information from all db nodes
Returns:
A dictionary with db node name as key and db usage in % as value
"""
to_return = {}
try:
uve_url = "http://127.0.0.1:" + str(rest_api_port) + "/analytics/uves/database-nodes?cfilt=DatabaseUsageInfo"
node_dburls = json.loads(urllib2.urlopen(uve_url).read())
for node_dburl in node_dburls:
# calculate disk usage percentage for analytics in each cassandra node
db_uve_state = json.loads(urllib2.urlopen(node_dburl['href']).read())
db_usage_in_perc = (100*
float(db_uve_state['DatabaseUsageInfo']['database_usage'][0]['analytics_db_size_1k'])/
float(db_uve_state['DatabaseUsageInfo']['database_usage'][0]['disk_space_available_1k'] +
db_uve_state['DatabaseUsageInfo']['database_usage'][0]['disk_space_used_1k']))
to_return[node_dburl['name']] = db_usage_in_perc
except Exception as inst:
self._logger.error(type(inst)) # the exception instance
self._logger.error(inst.args) # arguments stored in .args
self._logger.error(inst) # __str__ allows args to be printed directly
self._logger.error("Could not retrieve db usage information")
self._logger.info("db usage:" + str(to_return))
return to_return
#end get_dbusage_info
# end AnalyticsDb
| [
"pan2za@live.com"
] | pan2za@live.com |
2edf1931164f851816b5a7d1e3251a17aeae7646 | efa1c4375c1c390d8c739be3fa037614fc1a8376 | /scripts/excel-playground.py | 91c416165195e5b340cfd7915260a600adcbffaa | [] | no_license | bellabellahuang/codingIsland | 560760ee456f30e28e522015f7adea28b1889fa6 | a4daa940f530d5729c8f55a8a55ef90d300b9c01 | refs/heads/master | 2023-07-26T01:45:16.403430 | 2023-07-13T17:14:43 | 2023-07-13T17:14:43 | 133,688,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,720 | py | import openpyxl as xl
import re
from copy import copy
FILTER_ROW_REGEX = '^Applied filters.*'
validator = re.compile(FILTER_ROW_REGEX)
MONTH_MAP = {
'January':'01',
'February':'02',
'March':'03',
'April':'04',
'May':'05',
'June':'06',
'July':'07',
'August':'08',
'September':'09',
'October':'10',
'November':'11',
'December':'12',
}
input ="original_file.xlsx"
wb = xl.load_workbook(input, data_only=True)
output = xl.Workbook()
tab_names = wb.sheetnames
for name in tab_names:
if name in ['name_1', 'name_2']: continue # skip specific tabs
tab = wb[name]
print(f"Formatting {tab}")
ws = output.create_sheet(title=name)
# remove the "Applied filter" row
for row in tab.iter_rows():
cell_value = str(row[0].value)
if validator.match(cell_value):
tab.delete_rows(row[0].row, 1)
print("Filter row deleted.")
break
# get all the column names
headers = []
for col in tab[1]:
headers.append(col.value)
# add the recalc difference col if it does not exist
RECALC_DIFF_COL = "New col name"
if len(headers) == 13 and RECALC_DIFF_COL not in headers:
tab.insert_cols(11)
print(F"{RECALC_DIFF_COL} col is inserted.")
# for col in tab[1]:
# print(col.value)
# add column for PID
tab.insert_cols(3)
for row in tab.iter_rows():
row[2].value = name
# save the new tab
for row in tab.iter_rows():
# if row[0].row == 4:
for cell in row:
# print(cell.row, row.index(cell))
col = row.index(cell) + 1
# keep header row and first 3 cols
if cell.row ==1 or col in [1,2,3]:
ws.cell(row=cell.row, column=col).value = MONTH_MAP.get(cell.value, cell.value)
elif cell.has_style and cell.fill.bgColor.rgb != '00000000':
# print(cell.fill.bgColor.rgb)
ws.cell(row=cell.row, column=col).value = cell.value
ws.cell(row=cell.row, column=col).fill = copy(cell.fill)
output.save(filename="new_file.xlsx")
print("File saved.")
# merge tabs
adj_wb = xl.load_workbook("new_file.xlsx", data_only=True)
single_wb = xl.Workbook()
single_ws = single_wb.create_sheet()
single_row = 0
adj_tabs = adj_wb.sheetnames
for name in adj_tabs:
if name == 'Sheet': continue
tab = adj_wb[name]
for row in tab.iter_rows():
for cell in row:
col = row.index(cell) + 1
single_ws.cell(row=single_row + cell.row, column=col).value = cell.value
single_row += tab.max_row
# single_col += tab.max_column
single_wb.save(filename="merge_file.xlsx")
print("Merged.")
| [
"bellahuang1012@gmail.com"
] | bellahuang1012@gmail.com |
cd7e2d99660a6c8d2c97d9ef1fc346ff6573713c | 8703e78356836ec002bcff1142a995d10c2f4b63 | /OpenCV/image_operation/OpenCV_adding_method.py | 4aa775bcd07e848e4803cce4cd27a588a3218189 | [] | no_license | hossainarif726/OpenCV_library_with_python | 0a13d00b7207da06ccc517a14bcf0627a5eee3d0 | 49f6518432edda453fe966fd90b0fe37bea13338 | refs/heads/master | 2022-10-19T23:30:40.992482 | 2020-06-12T13:51:27 | 2020-06-12T13:51:27 | 271,801,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import cv2
import numpy as np
img1 = cv2.imread('test1.png')
img2 = cv2.imread('test2.png')
#for adding two image must be in same shape
add = cv2.add(img1,img2) #it will destroy opaqueness
cv2.imshow('added image',add)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | hossainarif726.noreply@github.com |
9307e0b490c5ef2e862972bb41f4bb163d50a2ce | e763c6420cf88d02c7a261dd151bd87096551102 | /CornershopVenv/bin/pyhtmlizer | b573cc3e7548b6a9df22472c41ad02c316a30629 | [] | no_license | Ch3did/super-journey | 0a420e3c83c4664a6df7deb72509c9636f5489ef | ee82739010aceabefaf7e48453f64055afa637e3 | refs/heads/master | 2023-02-16T14:52:19.538132 | 2021-01-09T13:08:21 | 2021-01-09T13:08:21 | 328,152,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | #!/home/ch3did/projects/python/scrapy/super-journey/CornershopVenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.htmlizer import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"phchedidk@gmail.com"
] | phchedidk@gmail.com | |
4da3656f25196687de4aea18a3990b141ca6b24b | 45744e4e6764d7c409262aff3cebf9f4b820196e | /cart/urls.py | 13f1c3d0f1856414512d3735be85d6091f52ac65 | [] | no_license | challengevolley/ticket | 62e133ae9fb31e2d346722ecccfc800b10347b6b | 6d8b7880a7618fcffa3a283483820732f52a6c15 | refs/heads/master | 2020-05-26T20:28:38.340043 | 2019-05-24T06:21:23 | 2019-05-24T06:21:23 | 188,364,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.urls import path
from . import views
app_name = 'cart'
urlpatterns = [
path('tickets/<int:ticket_id>/delete', views.cart_delete, name='delete'),
path('', views.cart_list, name='list'),
] | [
"ec2-user@ip-172-31-37-226.us-east-2.compute.internal"
] | ec2-user@ip-172-31-37-226.us-east-2.compute.internal |
14d710f1b9dfa4a4a18b83a5595cc7c0ece6f1bf | eab26813f4c88ae9dfdbc91275f1f9741325831c | /auctions/migrations/0002_auto_20180608_2235.py | 58fb3dcf994c8716d6598b179d2c3fab37339eaf | [] | no_license | kengitahi/auctionApp | 6136ae6f0dc0857f0d6922e562ff205d06dd5d8b | 3095ebd53c43fb4a7ee9e9e53fae728692745b9f | refs/heads/master | 2020-03-25T13:31:17.235992 | 2018-06-19T11:55:05 | 2018-06-19T11:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-06-08 22:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Auction',
new_name='Users',
),
]
| [
"ademdinarevic@gmail.com"
] | ademdinarevic@gmail.com |
450f022bb706397bd26593c0ec32e34a60d8265a | 61d888900947129abd1cbb1a7d99296ca6c31dcf | /test/test_mfa_recovery_code.py | 4ca2deec69dd3b5a699f65190791559206a07379 | [] | no_license | krezreb/openapi-client-clevercloud | b8e8454e571d0e08eae6ea5a83a129b1928c085c | e42e5380c6fd4003cdfbdccda89f9dd5a3c76754 | refs/heads/master | 2023-05-06T19:28:16.547835 | 2021-05-28T08:31:35 | 2021-05-28T08:31:35 | 371,631,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | """
Clever-Cloud API
Public API for managing Clever-Cloud data and products # noqa: E501
The version of the OpenAPI document: 1.0.1
Contact: support@clever-cloud.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import openapi_client
from openapi_client.model.mfa_recovery_code import MFARecoveryCode
class TestMFARecoveryCode(unittest.TestCase):
"""MFARecoveryCode unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMFARecoveryCode(self):
"""Test MFARecoveryCode"""
# FIXME: construct object with mandatory attributes with example values
# model = MFARecoveryCode() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"josephbeeson@gmail.com"
] | josephbeeson@gmail.com |
eb17e571bdbaa9fe4f379b7c55e5f4548a7d23aa | 3b8a3640431aff0744afca711ea6f172c000f79b | /visualization.py | bc470c50a0f558768b7ce734d1390c0e0157801a | [] | no_license | fulowl/embeddings-visualization | 9d604a65030ab5ce3f1c288c417a8adfce18971e | 48fbe028a36afecbd3cb30c16d3f3c5266ecd9e3 | refs/heads/main | 2023-03-15T19:38:36.968513 | 2021-01-31T03:25:54 | 2021-01-31T03:25:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,500 | py | import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.metrics.functional import accuracy
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from torchvision.datasets import MNIST
class EmbeddingsCallback(Callback):
def __init__(self):
super().__init__()
def on_test_end(self, trainer, pl_module):
trainer.logger.experiment.add_embedding(
pl_module.test_embeddings,
pl_module.test_targets,
global_step=trainer.global_step)
class ANN(pl.LightningModule):
def __init__(self, data_dir='./'):
super().__init__()
# Set our init args as class attributes
self.data_dir = data_dir
self.test_targets = []
self.test_embeddings = torch.zeros((0, 100),
dtype=torch.float32,
device='cuda:0')
self.test_predictions = []
# Hardcode some dataset specific attributes
self.num_classes = 10
self.dims = (1, 28, 28)
self.transform = transforms.Compose([transforms.ToTensor()])
self.conv1 = nn.Conv2d(1, 16, 3)
self.bn1 = nn.BatchNorm2d(16)
self.maxpool1 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 13 * 13, 100)
self.fc2 = nn.Linear(100, self.num_classes)
# Define PyTorch model
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.maxpool1(x))
x = x.view(-1, 16 * 13 * 13)
x = self.fc1(x)
y = self.fc2(F.relu(x))
return x, y
def training_step(self, batch, batch_idx):
x, y = batch
_, logits = self(x)
loss = F.cross_entropy(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
self.log('train_loss', loss, prog_bar=True)
self.log('train_acc', acc, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
_, logits = self(x)
loss = F.cross_entropy(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
# Calling self.log will surface up scalars for you in TensorBoard
self.log('val_loss', loss, prog_bar=True)
self.log('val_acc', acc, prog_bar=True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
x, y = batch
embeddings, logits = self(x)
loss = F.cross_entropy(logits, y)
preds = torch.argmax(logits, dim=1)
acc = accuracy(preds, y)
self.test_predictions.extend(preds.detach().cpu().tolist())
self.test_targets.extend(y.detach().cpu().tolist())
self.test_embeddings = torch.cat((self.test_embeddings, embeddings), 0)
self.log('test_acc', acc)
self.log('test_loss', loss)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters())
return optimizer
####################
# DATA RELATED HOOKS
####################
def prepare_data(self):
# download
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit':
dataset_full = MNIST(self.data_dir,
train=True,
transform=self.transform)
self.dataset_train, self.dataset_val = random_split(
dataset_full, [55000, 5000])
# Assign test dataset for use in dataloader(s)
if stage == 'test':
self.dataset_test = MNIST(self.data_dir,
train=False,
transform=self.transform)
np.random.seed(19)
random_indices = np.random.uniform(0, 10000, 100).astype(np.uint8)
outlier_list = []
for i in range(100):
outlier = np.random.uniform(0, 255, (28, 28)).astype(np.uint8)
outlier_list.append(outlier)
for idx in range(len(random_indices)):
self.dataset_test.data[random_indices[idx]] = torch.ByteTensor(
outlier_list[idx])
def train_dataloader(self):
return DataLoader(self.dataset_train, batch_size=32, shuffle=True)
def val_dataloader(self):
return DataLoader(self.dataset_val, batch_size=32)
def test_dataloader(self):
return DataLoader(self.dataset_test, batch_size=32)
if __name__ == "__main__":
model = ANN()
embedding_callback = EmbeddingsCallback()
checkpoint_callback = ModelCheckpoint(
monitor='val_loss',
filename='mnist-{epoch:02d}-{val_loss:.2f}',
save_top_k=3,
save_weights_only=True)
trainer = pl.Trainer(gpus=1,
max_epochs=5,
progress_bar_refresh_rate=20,
callbacks=[checkpoint_callback, embedding_callback])
trainer.fit(model)
trainer.test()
| [
"noreply@github.com"
] | fulowl.noreply@github.com |
6d48a5673304b73a12e313b84ffc00a070f3ebc5 | 007691fe729d3bf1f85b4fe59a6be35249a4b592 | /MLE_tf.py | cf80b912da31c4bc08529719e97195c964875a09 | [] | no_license | Hitesh1912/Machine-Learning | dc8c03bb2ea03acb9b7932b0bf48d985b4c713c8 | f4fba43be263d54a8f6ca04491806c41a29b3fb2 | refs/heads/master | 2020-04-21T11:12:08.476871 | 2019-04-28T22:59:24 | 2019-04-28T22:59:24 | 169,514,716 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,498 | py | # Q6 Implementing Neural network using Max likelihood (cross entropy) loss using softmax
import tensorflow as tf
import numpy as np
import pandas as pd
from random import seed
# Function importing Dataset
def importdata():
global train_data
train_data = pd.read_csv(
'train_wine.csv', sep =',', header=None)
print(train_data.shape)
test_data = pd.read_csv(
'test_wine.csv', sep =',', header=None)
print(test_data.shape)
return train_data.values, test_data.values
# Function to split the dataset
def splitdataset(data):
# Seperating the target variable
x = data[:, 1:data.shape[1]]
y = data[:, 0]
# print(np.shape(x), np.shape(y))
return x, y
def feature_normalization(x):
mu = np.mean(x,axis=0)
sigma = np.std(x,axis=0)
return mu, sigma
def normalization(x,mu,sigma):
x = np.subtract(x, mu)
x = np.divide(x, sigma)
return x
def dense_to_one_hot(labels_dense, num_classes=3):
labels_dense = np.subtract(labels_dense,1)
labels_one_hot = tf.one_hot(labels_dense,depth=3)
return labels_one_hot.eval()
def evaluate_model(X_train, X_test, y_train, y_test, epochs, batch_size):
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Initialize the variables (i.e. assign their default value)
sess.run(init)
for epoch in range(epochs):
avg_cost = 0.0
total_batch = int(len(X_train) / batch_size)
x_batches = np.array_split(X_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
for i in range(total_batch):
batch_x, batch_y = x_batches[i], y_batches[i]
batch_y = dense_to_one_hot(y_batches[i])
_, c = sess.run([optimizer, loss], feed_dict={ input_layer: batch_x, real_output: batch_y})
avg_cost += c / total_batch
if epoch % 100 == 0:
print("Epoch:", '%04d' % (epoch + 1), "loss=","{:.9f}".format(avg_cost))
print("\nTraining complete!")
# #prediction on test set
predict = tf.argmax(output_layer, 1)
pred = predict.eval({input_layer: X_test.reshape(-1, num_input)})
print(pred)
correct_prediction = np.add(pred,1)
print(correct_prediction)
pred_temp = tf.equal(tf.argmax(output_layer, 1), tf.argmax(real_output, 1))
accuracy = tf.reduce_mean(tf.cast(pred_temp, "float"))
print("Test Accuracy:", accuracy.eval({input_layer: X_test.reshape(-1, num_input), real_output: dense_to_one_hot(y_test)}))
if __name__ == '__main__':
# To stop potential randomness
seed = 128
rng = np.random.RandomState(seed)
#get dataset
trainset, testset = importdata()
#split features, label
X_train, y_train = splitdataset(trainset)
X_test, y_test = splitdataset(testset)
#feature normalization
mu, sigma = feature_normalization(X_train)
X_train = normalization(X_train, mu, sigma)
X_test = normalization(X_test, mu, sigma)
# Network Parameters
num_input = X_train.shape[1] #features 12
num_hidden = 5
num_output = 3
# define placeholders
input_layer = tf.placeholder(tf.float32, [None, num_input])
real_output = tf.placeholder(tf.float32, [None, num_output])
# Training Parameters
learning_rate = 0.01
epochs = 1000
batch_size = 50
# define weights and biases of the neural network
hidden_layer_weights = tf.Variable(tf.random_normal([num_input, num_hidden], seed = seed))
hidden_layer_biases = tf.Variable(tf.random_normal([num_hidden],seed = seed))
output_layer_weights = tf.Variable(tf.random_normal([num_hidden, num_output],seed = seed))
output_layer_biases = tf.Variable(tf.random_normal([num_output],seed = seed))
# create our neural networks computational graph
hidden_layer = tf.add(tf.matmul(input_layer, hidden_layer_weights), hidden_layer_biases)
hidden_layer = tf.nn.relu(hidden_layer)
output_layer = tf.matmul(hidden_layer, output_layer_weights) + output_layer_biases
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output_layer,labels=real_output)) # used in maximum likelihood
#our backpropogation algorithm | ADAM is variant of Gradient Descent algorithm
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)
# #training
evaluate_model(X_train, X_test, y_train, y_test, epochs, batch_size) | [
"verma.h@husky.neu.edu"
] | verma.h@husky.neu.edu |
ce2ab9fd24d842cfe951413780212372f003ae35 | dbf5c7fb0e111c14a044481796471f2eb49d2062 | /api_yolov5/restapi.py | dfed8ef030e2e49a745eab0f1052884dc7ffd6f1 | [
"MIT"
] | permissive | ylantt/TechSeeds_SF_UH21 | 9e779c7ddc59541e663425e49a4109d1ceac184e | 26c9f5cf61ad0a4de34e77eb9f5caf28d361648d | refs/heads/master | 2023-08-27T21:45:37.305287 | 2021-10-03T00:34:56 | 2021-10-03T00:34:56 | 403,484,506 | 0 | 0 | null | 2021-10-02T22:36:22 | 2021-09-06T04:27:01 | JavaScript | UTF-8 | Python | false | false | 1,758 | py | """
Run a rest API exposing the yolov5s object detection model
"""
import argparse
import io
from PIL import Image
import torch
from flask import Flask, request, jsonify
import base64
import json
app = Flask(__name__)
DETECTION_URL = "/v1/object-detection/yolov5s"
@app.route(DETECTION_URL, methods=["POST"])
def predict():
if not request.method == "POST":
return
print('hello')
dataDict = request.get_json()
photoBase64 = dataDict["photoBase64"]
image_64_decode = base64.decodebytes(bytes(photoBase64, "UTF-8"))
image_result = open('deer_decode.jpg', 'wb')
image_result.write(image_64_decode)
# if request.files.get("image"):
# image_file = request.files["image"]
# print(image_file)
# image_bytes = image_file.read()
img = Image.open('deer_decode.jpg')
print(type(img))
results = model(img, size=640)
data = results.pandas().xyxy[0].to_json(orient="records")
print(data)
data = json.loads(data[1:len(data) - 1])
try:
print("img shape: ", img.size)
img_size = img.size[0] * img.size[1]
skin_disease_area = (data['xmax'] - data['xmin']) * (data['ymax'] - data['ymin'])
data['level'] = skin_disease_area / img_size
except:
print('Error')
print(data)
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model")
parser.add_argument("--port", default=5000, type=int, help="port number")
args = parser.parse_args()
model = torch.hub.load('ultralytics/yolov5', 'custom',
path='exp3/best.pt')
model.eval()
app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
| [
"luuvancui10a1516@gmail.com"
] | luuvancui10a1516@gmail.com |
e0ee80372c7505afea67b37c9508ee47e3049dee | 1892a473b7eed6aaa712bc2959a1aca48beec284 | /forks/rlkit/rlkit/launchers/ppo_three_tier_shared_goal_gen.py | 0b69f5208749bf01ef4f6d8fa809af59e8c268a0 | [
"MIT"
] | permissive | AndrewPaulChester/sage-code | d3753bc894f21ce057c1a273e54926e368529e2b | 9fe676bfbcbc6f642eca29b30a1027fba2a426a0 | refs/heads/main | 2023-05-05T19:08:21.655463 | 2021-05-27T05:21:54 | 2021-05-27T05:21:54 | 371,245,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | import gym
from torch import nn as nn
import os
import numpy as np
from forks.rlkit.rlkit.exploration_strategies.base import PolicyWrappedWithExplorationStrategy
from forks.rlkit.rlkit.exploration_strategies.epsilon_greedy import (
EpsilonGreedy,
AnnealedEpsilonGreedy,
)
from forks.rlkit.rlkit.policies.argmax import ArgmaxDiscretePolicy
from forks.rlkit.rlkit.torch.dqn.dqn import DQNTrainer
from forks.rlkit.rlkit.torch.conv_networks import CNN
import forks.rlkit.rlkit.torch.pytorch_util as ptu
from forks.rlkit.rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from forks.rlkit.rlkit.launchers.launcher_util import setup_logger
from forks.rlkit.rlkit.launchers import common
from forks.rlkit.rlkit.samplers.data_collector import MdpStepCollector, MdpPathCollector
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr import utils
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.envs import TransposeImage, make_vec_envs
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.model import CNNBase, create_output_distribution
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.wrappers.policies import WrappedPolicy, MultiPolicy
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.wrappers.trainers import PPOTrainer, MultiTrainer
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.wrappers.data_collectors import (
RolloutStepCollector,
HierarchicalStepCollector,
ThreeTierStepCollector,
)
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr.wrappers.algorithms import TorchIkostrikovRLAlgorithm
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr import distributions
from forks.pytorch_a2c_ppo_acktr_gail.a2c_ppo_acktr import distributions
from gym_agent.learn_plan_policy import LearnPlanPolicy
from gym_agent.controller import CraftController
from gym_agent.planner import ENHSPPlanner
def experiment(variant):
common.initialise(variant)
expl_envs, eval_envs = common.create_environments(variant)
(
obs_shape,
obs_space,
action_space,
n,
mlp,
channels,
fc_input,
) = common.get_spaces(expl_envs)
# # CHANGE TO ORDINAL ACTION SPACE
# action_space = gym.spaces.Box(-np.inf, np.inf, (8,))
# expl_envs.action_space = action_space
# eval_envs.action_space = action_space
ANCILLARY_GOAL_SIZE = variant["ancillary_goal_size"]
SYMBOLIC_ACTION_SIZE = 12
base = common.create_networks(variant, n, mlp, channels, fc_input)
control_base = common.create_networks(
variant, n, mlp, channels, fc_input + SYMBOLIC_ACTION_SIZE, conv=base.main
) # for uvfa goal representation
dist = common.create_symbolic_action_distributions(
variant["action_space"], base.output_size
)
control_dist = distributions.Categorical(base.output_size, action_space.n)
eval_learner = WrappedPolicy(
obs_shape,
action_space,
ptu.device,
base=base,
deterministic=True,
dist=dist,
num_processes=variant["num_processes"],
obs_space=obs_space,
)
planner = ENHSPPlanner()
# multihead
# eval_controller = CraftController(
# MultiPolicy(
# obs_shape,
# action_space,
# ptu.device,
# 18,
# base=base,
# deterministic=True,
# num_processes=variant["num_processes"],
# obs_space=obs_space,
# )
# )
# expl_controller = CraftController(
# MultiPolicy(
# obs_shape,
# action_space,
# ptu.device,
# 18,
# base=base,
# deterministic=False,
# num_processes=variant["num_processes"],
# obs_space=obs_space,
# )
# )
# uvfa
eval_controller = CraftController(
WrappedPolicy(
obs_shape,
action_space,
ptu.device,
base=control_base,
dist=control_dist,
deterministic=True,
num_processes=variant["num_processes"],
obs_space=obs_space,
symbolic_action_size=SYMBOLIC_ACTION_SIZE,
),
n=n,
)
expl_controller = CraftController(
WrappedPolicy(
obs_shape,
action_space,
ptu.device,
base=control_base,
dist=control_dist,
deterministic=False,
num_processes=variant["num_processes"],
obs_space=obs_space,
symbolic_action_size=SYMBOLIC_ACTION_SIZE,
),
n=n,
)
function_env = gym.make(variant["env_name"])
eval_policy = LearnPlanPolicy(
eval_learner,
planner,
eval_controller,
num_processes=variant["num_processes"],
vectorised=True,
env=function_env,
)
expl_learner = WrappedPolicy(
obs_shape,
action_space,
ptu.device,
base=base,
deterministic=False,
dist=dist,
num_processes=variant["num_processes"],
obs_space=obs_space,
)
expl_policy = LearnPlanPolicy(
expl_learner,
planner,
expl_controller,
num_processes=variant["num_processes"],
vectorised=True,
env=function_env,
)
eval_path_collector = ThreeTierStepCollector(
eval_envs,
eval_policy,
ptu.device,
ANCILLARY_GOAL_SIZE,
SYMBOLIC_ACTION_SIZE,
max_num_epoch_paths_saved=variant["algorithm_kwargs"][
"num_eval_steps_per_epoch"
],
num_processes=variant["num_processes"],
render=variant["render"],
gamma=1,
no_plan_penalty=True,
meta_num_epoch_paths=variant["meta_num_steps"],
)
expl_path_collector = ThreeTierStepCollector(
expl_envs,
expl_policy,
ptu.device,
ANCILLARY_GOAL_SIZE,
SYMBOLIC_ACTION_SIZE,
max_num_epoch_paths_saved=variant["num_steps"],
num_processes=variant["num_processes"],
render=variant["render"],
gamma=variant["trainer_kwargs"]["gamma"],
no_plan_penalty=variant.get("no_plan_penalty", False),
meta_num_epoch_paths=variant["meta_num_steps"],
)
# added: created rollout(5,1,(4,84,84),Discrete(6),1), reset env and added obs to rollout[step]
learn_trainer = PPOTrainer(
actor_critic=expl_policy.learner, **variant["trainer_kwargs"]
)
control_trainer = PPOTrainer(
actor_critic=expl_policy.controller.policy, **variant["trainer_kwargs"]
)
trainer = MultiTrainer([control_trainer, learn_trainer])
# missing: by this point, rollout back in sync.
replay_buffer = EnvReplayBuffer(variant["replay_buffer_size"], expl_envs)
# added: replay buffer is new
algorithm = TorchIkostrikovRLAlgorithm(
trainer=trainer,
exploration_env=expl_envs,
evaluation_env=eval_envs,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant["algorithm_kwargs"],
# batch_size,
# max_path_length,
# num_epochs,
# num_eval_steps_per_epoch,
# num_expl_steps_per_train_loop,
# num_trains_per_train_loop,
# num_train_loops_per_epoch=1,
# min_num_steps_before_training=0,
)
algorithm.to(ptu.device)
algorithm.train()
| [
"48459485+AndrewPaulChester@users.noreply.github.com"
] | 48459485+AndrewPaulChester@users.noreply.github.com |
34ae52f5ff6b67f8225ff5fc8bfab7b05c981161 | 2c755562ef6cd6d2e44d09dc52ce157584638a37 | /Day23/turtle-crossing-start/main.py | f50e1850de5cb13f508d2e118f64a6c4ce6d704c | [] | no_license | yayuntsai/python-100DaysofCode | 605c6725091532c3585fbe8cdecd35997356e7f7 | 5d97de9c11a84f22b6a98944c1f9d2bb440eccb9 | refs/heads/master | 2023-04-20T00:02:03.527993 | 2021-05-09T07:51:38 | 2021-05-09T07:51:38 | 334,806,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.tracer(0)
player = Player()
car_manager = CarManager()
scoreboard = Scoreboard()
screen.listen()
screen.onkey(player.go_up, "Up")
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
car_manager.create_brick()
car_manager.move_cars()
# Detect car collision
for car in car_manager.all_cars:
if player.distance(car) < 10:
game_is_on = False
scoreboard.lose_game()
elif player.ycor() > 280:
game_is_on = False
scoreboard.win_game()
screen.exitonclick() | [
"anniesnoopymd@gmail.com"
] | anniesnoopymd@gmail.com |
54fe0d0bcf3120535489e1bf465956fd8e60a6c2 | 7cd4ab1a89091818f8b2253693cb3f8a63f31044 | /utils.py | 2f13ce718923f1cc02d5c3aa994bfde2106ba364 | [
"MIT"
] | permissive | wycharry/ResNeXt | 65c20cfc99f82ccd35c110620bf3d63fcd8cb712 | e97c100c3f5f4f4df7af1d50ba83516272d9dac4 | refs/heads/master | 2021-01-20T14:15:05.047899 | 2017-04-30T09:28:06 | 2017-04-30T09:28:06 | 90,573,064 | 1 | 0 | null | 2017-05-08T01:12:35 | 2017-05-08T01:12:35 | null | UTF-8 | Python | false | false | 916 | py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def time_string():
ISOTIMEFORMAT='%Y-%m-%d %X'
string = '[{}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
| [
"you@example.com"
] | you@example.com |
0bbc30496304005797e4c0345d4048ac4c5b95fc | b3ce3c3eadd29ee8e269f4374d39921d0fe47744 | /python/lucy/parser/test_lucycompiler.py | 4ef4a008dccb83362ee747d31783f2178d3db2e0 | [] | no_license | AllanDaemon/Diaspar | 9a9db38341701ec17803182c2231af36fc60673e | 8d69208643eef599761432262d7532951a9e5b1d | refs/heads/master | 2020-12-24T11:45:28.882691 | 2010-12-11T00:15:40 | 2010-12-11T00:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,738 | py |
import ply.yacc as yacc
import unittest
from compiler import ast
from compiler import misc, syntax, pycodegen
from compiler.consts import *
from test_lucyparser import BaseTest
from lucyparser import LucyParser
from lucycompiler import LucyCompiler
import lucybuiltins
class TestCompiledCode(BaseTest):
def setUp(self):
self.compiler = LucyCompiler()
def run_code(self, code, namespace):
bcode = self.compiler.compile(code)
lucyglobals = {}
lucyglobals.update(lucybuiltins.__dict__)
exec bcode in lucyglobals, namespace
def test_single_assignment(self):
"""Test a single assignment
a = 1
"""
code = self.get_string(self.test_single_assignment)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'a': 1})
def test_comments_are_ignored(self):
"""Test comments are ignored by parser/compiler
# this is a comment
a = 1
"""
code = self.get_string(self.test_comments_are_ignored)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'a': 1})
def test_assign_expr(self):
"""Test a few assignment expressions
a = 1 + 2 * 3
b = (1 + 2) * 3
c = b % a
d = 2 ** 6
pi = 3.14
s = "Hello World!"
"""
code = self.get_string(self.test_assign_expr)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'a': 7,
'b': 9,
'c': 2,
'd': 64,
'pi': 3.14,
# FIXME: strip quotes out of strings
's': "Hello World!",
}
)
def test_list_literal(self):
"""Test if the list literal works
x = [1, 2, 3]
"""
code = self.get_string(self.test_list_literal)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'x':[1, 2, 3]})
def test_tuple_literal(self):
"""Test if the tuple literal works
x = (1, 2, 3)
"""
code = self.get_string(self.test_tuple_literal)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'x':(1, 2, 3)})
def test_dict_literal(self):
"""Test if the dict literal works
x = {"a":1, "b":2, "c":(3, 4, 5)}
"""
code = self.get_string(self.test_dict_literal)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'x':{'a':1, 'b':2, 'c':(3, 4, 5)}})
def test_string_literal(self):
"""Test if the string literal works
x = "abc"
"""
code = self.get_string(self.test_string_literal)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'x':"abc"})
def test_if_stmt(self):
"""Test the if statement
a = 1
if a == 1:
b = 1
"""
code = self.get_string(self.test_if_stmt)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'a': 1, 'b': 1})
def test_assert_stmt(self):
"""Test the assert statement
a = 1
assert a == 1
"""
code = self.get_string(self.test_assert_stmt)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace, {'a': 1})
def test_assert_stmt_fails(self):
# FIXME!
"""Test if the assert statement fails
a = 0
assert a == 1
c = a == 1
"""
code = self.get_string(self.test_assert_stmt_fails)
namespace = {}
self.assertRaises(AssertionError, self.run_code, code, namespace)
def test_assign_to_literal_fails(self):
"""Test if an assignment to literal fails
1 = 0
"""
code = self.get_string(self.test_assign_to_literal_fails)
namespace = {}
self.assertRaises(SyntaxError, self.run_code, code, namespace)
def test_single_conditional(self):
"""Test if an 'if' conditional works
if x:
y = 1
"""
code = self.get_string(self.test_single_conditional)
namespace = {'x':1, 'y':0}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 1)
self.assertEqual(namespace['y'], 1)
namespace = {'x':0, 'y':1}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 0)
self.assertEqual(namespace['y'], 1)
def test_multiple_conditional(self):
"""Test if a multiple 'if' conditional works
if x > 0:
y = 1
elif x < 0:
y = 2
elif x == 0:
y = 3
"""
code = self.get_string(self.test_multiple_conditional)
namespace = {'x':1}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 1)
self.assertEqual(namespace['y'], 1)
namespace = {'x':0}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 0)
self.assertEqual(namespace['y'], 3)
namespace = {'x':-1}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], -1)
self.assertEqual(namespace['y'], 2)
def test_if_elif_else_statement(self):
"""Test if/elif/else statement
if x > 0:
y = 1
elif x < 0:
y = 2
else:
y = 3
"""
code = self.get_string(self.test_if_elif_else_statement)
namespace = {'x':1}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 1)
self.assertEqual(namespace['y'], 1)
namespace = {'x':0}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 0)
self.assertEqual(namespace['y'], 3)
namespace = {'x':-1}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], -1)
self.assertEqual(namespace['y'], 2)
def test_if_else_statement(self):
"""Test if/else statement
if x > 0:
y = 1
else:
y = 2
"""
code = self.get_string(self.test_if_else_statement)
namespace = {'x':1}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 1)
self.assertEqual(namespace['y'], 1)
namespace = {'x':0}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], 0)
self.assertEqual(namespace['y'], 2)
def test_elif_before_if_fails(self):
"""Test if putting elif before if fails
elif x > 0:
y = 1
if x < 0:
y = 2
"""
code = self.get_string(self.test_elif_before_if_fails)
namespace = {}
self.assertRaises(SyntaxError, self.run_code, code, namespace)
def test_for_loop(self):
"""Test for loop
for x in seq:
v = v + x
"""
code = self.get_string(self.test_for_loop)
namespace = {'v':0, 'seq':[1, 2, 3, 4, 5]}
self.run_code(code, namespace)
self.assertEqual(namespace['v'], 15)
def test_for_else_loop(self):
"""Test for/else loop
for x in seq:
if x == 2:
continue
v = v + x
if v > 10:
break
else:
v = -1
"""
code = self.get_string(self.test_for_else_loop)
namespace = {'v':0, 'seq':[1, 2, 3, 4, 5]}
self.run_code(code, namespace)
self.assertEqual(namespace['v'], 13)
code = self.get_string(self.test_for_else_loop)
namespace = {'v':0, 'seq':[3, 3, 3, 3]}
self.run_code(code, namespace)
self.assertEqual(namespace['v'], 12)
code = self.get_string(self.test_for_else_loop)
namespace = {'v':0, 'seq':[1, 2, 3, 4]}
self.run_code(code, namespace)
self.assertEqual(namespace['v'], -1)
def test_while_loop(self):
"""Test while loop
while v < 10:
v += 1
"""
code = self.get_string(self.test_while_loop)
namespace = {'v':0}
self.run_code(code, namespace)
self.assertEqual(namespace['v'], 10)
def test_while_else_loop(self):
"""Test while/else loop
while v < 10:
v += 1
else:
v = -1
"""
code = self.get_string(self.test_while_else_loop)
namespace = {'v':0}
self.run_code(code, namespace)
self.assertEqual(namespace['v'], -1)
def test_getattribute(self):
"""Test if getting an attribute works
a = x.real
b = x.imag
"""
code = self.get_string(self.test_getattribute)
namespace = {'x':complex(1, 2)}
self.run_code(code, namespace)
self.assertEqual(namespace['x'], complex(1, 2))
self.assertEqual(namespace['a'], 1.0)
self.assertEqual(namespace['b'], 2.0)
def test_setattribute(self):
"""Test if setting an attribute works
o.a = 0
o.b = 1
o.c = "hello!"
"""
class TestObject(object):
pass
o = TestObject()
code = self.get_string(self.test_setattribute)
namespace = {'o':o}
self.run_code(code, namespace)
self.assertEqual(o.a, 0)
self.assertEqual(o.b, 1)
self.assertEqual(o.c, "hello!")
def test_single_let(self):
"""Test single let
let n:
a = 1
b = 2
c = a + b
d = [a, b, c]
"""
code = self.get_string(self.test_single_let)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace['n'], {'a':1, 'b':2, 'c':3, 'd':[1, 2, 3]})
def test_multiple_let(self):
"""Test if multiple let don't mess each other
let m:
a = 1
b = 2
let n:
a = 3
b = 4
"""
code = self.get_string(self.test_multiple_let)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace['m'], {'a':1, 'b':2})
self.assertEqual(namespace['n'], {'a':3, 'b':4})
def test_nested_let(self):
"""Test if nested let don't mess each other
let m:
a = 1
let n:
a = 2
"""
code = self.get_string(self.test_nested_let)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace['m'], {'a':1, 'n':{'a':2}})
def test_multiple_nested_let(self):
"""Test if multiple nested let work
let m:
a = 1
let n:
b = 2
let o:
c = 3
"""
code = self.get_string(self.test_multiple_nested_let)
namespace = {}
self.run_code(code, namespace)
self.assertEqual(namespace['m'], {'a':1, 'n':{'b':2, 'o':{'c':3}}})
def test_define(self):
"""Test define
define m:
a = 1
b = 2
"""
code = self.get_string(self.test_define)
namespace = {}
self.run_code(code, namespace)
m = {}
exec namespace['m'] in globals(), m
self.assertEqual(m, {'a':1, 'b':2})
def test_multiple_define(self):
"""Test if multiple define work
define m:
a = 1
b = 2
define n:
a = 3
b = 4
"""
code = self.get_string(self.test_multiple_define)
namespace = {}
self.run_code(code, namespace)
m = {}
n = {}
exec namespace['m'] in globals(), m
exec namespace['n'] in globals(), n
self.assertEqual(m, {'a':1, 'b':2})
self.assertEqual(n, {'a':3, 'b':4})
def test_nested_define(self):
"""Test if nested define work
define m:
a = 1
b = 2
define n:
a = 3
b = 4
"""
code = self.get_string(self.test_nested_define)
namespace = {}
self.run_code(code, namespace)
m = {}
n = {}
exec namespace['m'] in globals(), m
exec m['n'] in globals(), n
m['n'] = n
self.assertEqual(m, {'a':1, 'b':2, 'n':{'a':3, 'b':4}})
def test_let_closures(self):
"""Test if let closures resolve properly
a = 1
let m:
b = 2
let n:
c = 3
let o:
d = a + b + c
"""
code = self.get_string(self.test_let_closures)
namespace = {}
#self.run_code(code, namespace)
def test_let_with_wrapper(self):
"""Test let statement with namespace wrapper
let namespace m:
a = 1
b = 2
c = "3"
d = [1, 2, 3, 4]
"""
code = self.get_string(self.test_let_with_wrapper)
namespace = {}
self.run_code(code, namespace)
m = namespace['m']
self.assertEqual(type(m), lucybuiltins.namespace)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
self.assertEqual(m.c, "3")
self.assertEqual(m.d, [1, 2, 3, 4])
def test_define_with_wrapper(self):
"""Test define statement with function wrapper
define function f:
a = 1
b = 2
c = "3"
d = [1, 2, 3, 4]
"""
code = self.get_string(self.test_define_with_wrapper)
namespace = {}
self.run_code(code, namespace)
f = namespace['f']
self.assertEqual(type(f), lucybuiltins.function)
self.assertEqual(f.__name__, 'f')
#self.assertEqual(f.__code__, None)
#c = f.__code__
#for n in dir(c):
# print n, getattr(c, n)
if __name__ == '__main__':
unittest.main()
| [
"pjwerneck@gmail.com"
] | pjwerneck@gmail.com |
1ae7dec8c4d86cd17533ba9e55ed2f88c4a7df32 | 2bef484ad81004158e2e6b09f61640d5f148d34e | /speed_evaluator.py | c01057baba852447681fce666ddf8967dda1db49 | [] | no_license | rikuh00/Self_Driving | 2110a095220b5215964d228890273be5bc8fd213 | 4da7af39445331c648eee79474af9cb39c294ac8 | refs/heads/master | 2020-11-28T08:51:22.802097 | 2019-12-23T13:54:25 | 2019-12-23T13:54:25 | 229,761,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py | #%% IMPORTS
from mlxtend.plotting import plot_decision_regions
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#%% LOAD DATA
LEN_FEATURE = 1000
def get_feature(LEN_FEATURE):
grade = np.random.uniform(low = 0, high = 1, size = LEN_FEATURE)
bumpiness = np.random.uniform(low = 0, high = 1, size = LEN_FEATURE)
def _get_feature(i):
feature = [grade[i], bumpiness[i]]
return np.asarray(feature)
list_feature = [_get_feature(i) for i in range(LEN_FEATURE)]
arr_feature = np.asarray(list_feature)
return arr_feature
def get_label(arr_feature):
def _eval_feature(feature):
if feature[0]**4 + feature[1]**3 > 0.5: # grade^4 + bumpiness^3 > 0.5 => drive slow
return 1
return 0 # drive fast otherwise
list_label = [_eval_feature(feature) for feature in arr_feature]
arr_label = np.asarray(list_label)
return arr_label
feature_train, feature_test = get_feature(LEN_FEATURE), get_feature(LEN_FEATURE)
label_train, label_test = get_label(feature_train), get_label(feature_test)
#%% NAIVE BAYES
# Fitting the Classifier
clf = GaussianNB()
clf.fit(feature_train, label_train)
# Making a Prediction
pred = clf.predict([[0.25, 0.25]]) # correct ans == fast, since 0.25^2 < 0.25
print(pred)
# Evaluating the Classifier
score = clf.score(feature_test, label_test)
print('Score for the Classifier= {}'.format(score))
#%% SVM LINEAR
clf = svm.SVC(kernel = 'linear')
clf.fit(feature_train, label_train)
ax = plot_decision_regions(feature_train, label_train, clf = clf, legend = 1)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, ['Slow','Fast'])
plt.xlabel('Grade')
plt.ylabel('Bumpiness')
plt.title('NB on Speed Evaluation')
plt.show()
# Prediction
pred = clf.predict([[0.25, 0.25]])
print(pred)
# Evaluating the Classifier
score = clf.score(feature_test, label_test)
print('Score for the Classifier= {}'.format(score)) | [
"45102134+rikuh00@users.noreply.github.com"
] | 45102134+rikuh00@users.noreply.github.com |
3c550bb83e43021c81159ef5c4d5b7cd8e45bdba | 725fdde3f4ac9d4fd945003ffecb2d85e707930d | /Python_Practice3.py | a03ff7e09a2c642afd100074f824a5e65c58b3c5 | [] | no_license | jhoward19/Election_Analysis- | 5750c5d208b5c08f130fa01a7b15f7b1423f718f | 7bc5feb2b5073fa0930211313cae7dbf1266e6bd | refs/heads/main | 2023-08-31T17:39:41.974067 | 2021-10-31T14:48:29 | 2021-10-31T14:48:29 | 418,710,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | x = 0
while x <= 5:
print(x)
x = x + 1
| [
"jessicahoward@Jessicas-MBP.fios-router.home"
] | jessicahoward@Jessicas-MBP.fios-router.home |
7e3545f6a93d8f29fc70f487369548fce40f7376 | e087df9e896b6260695805aade9f728d51c59575 | /adsb_utilities/airspace.py | 19961e3250c2f4845a215200b3a48e352c033174 | [] | no_license | IslePilot/py3_scripts | 45f453ad22d0bc2d56f554b00c0295bceae51250 | f45e3ac1ae4a88de46027f4801f4b46aacb21f5e | refs/heads/master | 2023-07-20T12:12:10.555811 | 2023-07-12T14:52:28 | 2023-07-12T14:52:28 | 134,488,813 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,884 | py | #!/usr/bin/env python3
"""
************************************************************************************
Copyright 2020 (C) AeroSys Engineering, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
************************************************************************************
Revision History:
May 25, 2020, ksb, created
"""
import cifp_functions as cf
import maptools as maptools
# define a version for this file
VERSION = "1.0"
class AirspaceShape:
"""define a basic block of airspace, either controlled or restrictive"""
def __init__(self):
self.ar = []
self.name = None
self.controlling_agency = None
return
def add_airspace_record(self, airspace_record):
if airspace_record.controlling_agency != None:
# this must be a continuation record
self.controlling_agency = airspace_record.controlling_agency
else:
# simply append this record to our list
self.ar.append(airspace_record)
# the first record in the set will have the full name
if airspace_record.name != "":
self.name = airspace_record.name
return
def build_airspace_shape(self):
"""build a list of (lat, lon) tuples defining and airspace shape"""
# initialize our shape with the first cifp_point
shape = [(self.ar[0].latitude, self.ar[0].longitude)]
# work through the list of points
for i in range(len(self.ar)):
via = self.ar[i].boundary_via[0]
end = self.ar[i].boundary_via[1]
if via == "A":
# arc by edge
print("AirspaceShape.build_airspace_shape: Arc by edge not yet supported")
elif via == "C":
# circle
shape = maptools.circle((self.ar[0].arc_origin_latitude, self.ar[0].arc_origin_longitude), self.ar[0].arc_distance_nm)
elif via == "G":
# Great circle cifp_point
# if this is an end, add the first cifp_point otherwise add the next cifp_point
if end == "E":
shape.append((self.ar[0].latitude, self.ar[0].longitude))
else:
shape.append((self.ar[i+1].latitude, self.ar[i+1].longitude))
elif via == "H":
# Rhumb line -- not really correct, but just treat as a great circle, error won't be large for normal distances
if end == "E":
shape.append((self.ar[0].latitude, self.ar[0].longitude))
else:
shape.append((self.ar[i+1].latitude, self.ar[i+1].longitude))
elif via == "L" or via == "R":
# define the start
arc_begin = (self.ar[i].latitude, self.ar[i].longitude)
# define the end
if end == "E":
arc_end = (self.ar[0].latitude, self.ar[0].longitude)
else:
arc_end = (self.ar[i+1].latitude, self.ar[i+1].longitude)
# define the center
arc_center = (self.ar[i].arc_origin_latitude, self.ar[i].arc_origin_longitude)
# get the radius
radius_nm = self.ar[i].arc_distance_nm
# get the direction
if via == "R":
clockwise = True
else:
clockwise = False
# create a name
if self.ar[i].airspace_classification != None:
# UC Airspace
name = "Class {} Section {}".format(self.ar[i].airspace_classification, self.ar[i].multiple_code)
else:
name = "{},{}".format(self.ar[i].airspace_designation, self.ar[i].multiple_code)
# build the arc
arc = maptools.arc_path(arc_begin, arc_end, arc_center, radius_nm, clockwise, name)
for p in arc:
shape.append(p)
else:
print("AirspaceShape.build_airspace_shape: Unrecognized boundary via")
return shape
class AirspaceRecord:
TYPE_CONTROLLED_CLASS_C = 'A'
TYPE_CONTROLLED_CONTROL_AREA = 'C'
TYPE_CONTROLLED_TERMINAL = 'M'
TYPE_CONTROLLED_TRSA = 'R'
TYPE_CONTROLLED_CLASS_B = 'T'
TYPE_CONTROLLED_CLASS_D = 'Z'
TYPE_RESTRICTIVE_ALERT = 'A'
TYPE_RESTRICTIVE_CAUTION = 'C'
TYPE_RESSTRICTIVE_DANGER = 'D'
TYPE_RESTRICTIVE_MOA = 'M'
TYPE_RESTRICTIVE_PROHIBITED = 'P'
TYPE_RESTRICTIVE_RESTRICTED = 'R'
TYPE_RESTRICTIVE_TRAINING = 'T'
TYPE_RESTRICTIVE_WARNING = 'W'
TYPE_RESTRICTIVE_UNSPECIFIED = 'U'
def __init__(self, record):
# save the raw data
self.record = record
# parse the data
self.section = record[4:6]
self.continuation = False
if self.section == "UC":
self.parse_controlled_airspace(record)
elif self.section == "UR":
if record[24] == '0' or record[24] == '1':
self.parse_restrictive_airspace(record)
else:
self.continuation = True
self.parse_continuation_record(record)
else:
print("AirspaceRecord.__init__: Unknown airspace type: {}".format(self.section))
return
def parse_controlled_airspace(self, record):
"""parse a controlled airport record (UC)"""
# SUSAUCK2ZKBJC PAD A00100 CE N39543200W1050702000050 GND A07999MDENVER 473321703
# SUSAUCK2ZKEGE PAD A00100 G N39342405W106564855 GND A09100MEAGLE 473842004
# SUSAUCK2ZKEGE PAD A00200 R N39322480W106574152N39383390W10654574000651990 473852004
# SUSAUCK2ZKEGE PAD A00300 G N39392116W107031860 473862004
# SUSAUCK2ZKEGE PAD A00400 R N39390596W107003665N39383390W10654574000442770 473872004
# SUSAUCK2ZKEGE PAD A00500 G N39414066W106505555 473882004
# SUSAUCK2ZKEGE PAD A00600 R N39430974W106490000N39383390W10654574000650450 473892004
# SUSAUCK2ZKEGE PAD A00700 G N39370580W106464564 473902004
# SUSAUCK2ZKEGE PAD A00800 REN39373433W106492447N39383390W10654574000441030 473912004
# SUSAUCK2ZKBKF PAD A00100 R N39393650W104402500N39420630W10445071000441245 GND A07499MAURORA 473331703
# SUSAUCK2ZKBKF PAD A00200 G N39460130W104474270 473341703
# SUSAUCK2ZKBKF PAD A00300 G N39455530W104472450 473351703
# SUSAUCK2ZKBKF PAD A00400 G N39453560W104462610 473361703
# SUSAUCK2ZKBKF PAD A00500 G N39452270W104454850 473371703
# SUSAUCK2ZKBKF PAD A00600 G N39451240W104451840 473381703
# SUSAUCK2ZKBKF PAD A00700 G N39450370W104450200 473391703
# SUSAUCK2ZKBKF PAD A00800 G N39450000W104445630 473401703
# SUSAUCK2ZKBKF PAD A00900 G N39445390W104444630 473411703
# SUSAUCK2ZKBKF PAD A01000 G N39443000W104440480 473421703
# SUSAUCK2ZKBKF PAD A01100 GEN39442600W104435020 473431703
# SUSAUCK2ZKBKF PAD B01200 G N39442600W104435020 GND A06499MAURORA 473441703
# SUSAUCK2ZKBKF PAD B01300 G N39442380W104434230 473451703
# SUSAUCK2ZKBKF PAD B01400 G N39442450W104425950 473461703
# SUSAUCK2ZKBKF PAD B01500 G N39442430W104414800 473471703
# SUSAUCK2ZKBKF PAD B01600 G N39442410W104402340 473481703
# SUSAUCK2ZKBKF PAD B01700 R N39442390W104401490N39420630W10445071000440586 473491703
# SUSAUCK2ZKBKF PAD B01800 GEN39393650W104402500 473501703
# 123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012
# 1 2 3 4 5 6 7 8 9 10 11 12 13
self.airport = record[9:14].rstrip() # airport identifier
self.airspace_center_section = record[14:16]
self.airspace_classification = record[16] # A through G
# unused members
self.airspace_designation = None
self.controlling_agency = None
# same as UR
self.airspace_type = record[8] # eg: self.TYPE_CONTROLLED_CLASS_C
self.multiple_code = record[19] # designator defining airspace section (A-)...airspace with only one section will only have A
self.sequence_number = int(record[20:24])
self.continuation_count = cf.parse_int(record[24])
self.boundary_via = record[30:32]
self.latitude = cf.parse_lat(record[32:41])
self.longitude = cf.parse_lon(record[41:51])
self.arc_origin_latitude = cf.parse_lat(record[51:60])
self.arc_origin_longitude = cf.parse_lon(record[60:70])
self.arc_distance_nm = cf.parse_float(record[70:74], 10.0)
self.arc_bearing = cf.parse_float(record[74:78], 10.0)
self.name = record[93:123].rstrip()
return
def parse_restrictive_airspace(self, record):
"""parse a restrictive airport record (UR)"""
# SUSAURK2MCOUGAR H A00101L G N38533000W103000000 11000M17999MCOUGAR HIGH MOA 580281703
# SUSAURK2MCOUGAR H A00102C FAA DENVER ARTCC 580291703
# SUSAURK2MCOUGAR H A00200L G N39071900W102144300 580301703
# SUSAURK2MCOUGAR H A00300L G N39014000W101000000 580311703
# SUSAURK2MCOUGAR H A00400L G N38381000W101000000 580321703
# SUSAURK2MCOUGAR H A00500L H N38233000W102120400 580331703
# SUSAURK2MCOUGAR H A00600L G N38233000W102443400 580341703
# SUSAURK2MCOUGAR H A00700L GEN38344100W103000000 580351703
# 123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012
# 1 2 3 4 5 6 7 8 9 10 11 12 13
if record[9].isdigit():
self.airspace_designation = record[8:19].rstrip()
else:
self.airspace_designation = record[9:19].rstrip()
# unused records
self.airport = None
self.airspace_center_section = None
self.airspace_classification = None
self.controlling_agency = None
# same as UC
self.airspace_type = record[8] # eg: self.TYPE_RESTRICTIVE_PROHIBITED
self.multiple_code = record[19] # designator defining airspace section (A-)...airspace with only one section will only have A
self.sequence_number = int(record[20:24])
self.continuation_count = cf.parse_int(record[24])
self.boundary_via = record[30:32]
self.latitude = cf.parse_lat(record[32:41])
self.longitude = cf.parse_lon(record[41:51])
self.arc_origin_latitude = cf.parse_lat(record[51:60])
self.arc_origin_longitude = cf.parse_lon(record[60:70])
self.arc_distance_nm = cf.parse_float(record[70:74], 10.0)
self.arc_bearing = cf.parse_float(record[74:78], 10.0)
self.name = record[93:123].rstrip()
return
def parse_continuation_record(self, record):
# Continuation Types (5.91, p 105)
# C - Call Sign/Controlling Agency
if record[9].isdigit():
self.airspace_designation = record[8:19].rstrip()
else:
self.airspace_designation = record[9:19].rstrip()
# same as UC
self.multiple_code = record[19] # designator defining airspace section (A-)...airspace with only one section will only have A
# check the application type to know what to parse
if record[25] == "C":
self.controlling_agency = record[99:123].rstrip()
else:
self.controlling_agency = None
print("AirpsaceRecord.parse_continuation_record: Unhandled Continuation Record {}".format(record))
return
| [
"keith.barr@aerosys-eng.com"
] | keith.barr@aerosys-eng.com |
123ad9c3fcdd484b90d9adc07fcfa09fd77958c2 | c42ff25f85514afd85a528d6503816ebbded9257 | /venv/bin/audioidmon_monitor_fingerprint | a09bd6956144bae2637252c4ce59eaa3629c0906 | [] | no_license | NisalPriyanka/Lucy-Speech_Recognition_Assistance- | 3024430514ebe4ce19df20829e7cdb8b670e5ae8 | f703be2816935ad1a78643d3d69ed2380b69b227 | refs/heads/master | 2020-03-21T05:47:19.020512 | 2018-06-25T05:53:27 | 2018-06-25T05:53:27 | 138,180,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | #!/root/Documents/Lucy-Speech_Recognition_Assistance-/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from audioidmon.cli.audioidmon_monitor_fingerprint import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nisal.priyanka@my.sliit.lk"
] | nisal.priyanka@my.sliit.lk | |
4e8d76f4f75201c8c3593a7e90653b1d06a831d9 | 8a64bf83f8d9db5f3e2511596bb2abdd5119838a | /data_analysis/notebooks/model_custom.py | f7bd806459db34967811db6b325c2d6af6dba09b | [] | no_license | fougere44/e1 | b658596621e412ad9e931e06c3baf7a31bc121cb | dfa3000fd503e1eb402c6d00abe4df57eef9e10a | refs/heads/main | 2023-05-09T03:34:42.797897 | 2021-05-30T14:55:46 | 2021-05-30T14:55:46 | 364,997,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,158 | py | import numpy as np
import pandas as pd
import random
import cv2
import os
import sys
from os import listdir
from os.path import isfile, join
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from PIL import Image
import keras
import keras.backend as K
from keras.models import load_model
from keras.layers import Input, Dense, merge
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Reshape, BatchNormalization, SeparableConv2D, Activation
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import metrics
import sklearn
from sklearn.metrics import mean_squared_error
import mlflow
import mlflow.tensorflow
mlflow.tensorflow.autolog()
def load_photos(directory):
'''
Loads the photo from the directory and return arrays of images and labels
'''
images = []
directions = []
vitesses = []
dir_list = listdir(directory)
random.shuffle(dir_list)
for name in dir_list:
filename = directory + '/' + name
image = load_img(filename, target_size=(120, 160))
# est ce nécessaire de resize ?
image = img_to_array(image)
images.append(image)
direction = float((name.split('_')[2]).split('.jpg')[0])
directions.append(direction)
vitesse = float(name.split('_')[1])
vitesses.append(vitesse)
return images, directions, vitesses
X, Y, Z = load_photos("../data/images_data/images/circuits/entrainement")
print('Images chargées pour entraînement :',len(X))
X = np.array(X)
X /= 255.0
def mirror_image(X,Y,Z):
'''
Do a horizontal flip on every images of the dataset
'''
X_mirror = []
Y_mirror = []
Z_mirror = []
i=0
for image in X:
image = cv2.flip(image, 1)# 1 correspond to horizontal
X_mirror.append(image)
Y_mirror.append(np.flip(Y[i]))
Z_mirror.append(np.flip(Z[i]))
i=i+1
return X_mirror,Y_mirror, Z_mirror
def random_brightness(X,Y,Z):
X_bright = []
Y_bright = []
Z_bright = []
i=0
for image in X:
# HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness).
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
ratio = 1 + 0.8 * (np.random.rand() - 0.5)
hsv[:,:,2] = hsv[:,:,2] * ratio
tmp = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
X_bright.append(tmp)
Y_bright.append(Y[i])
Z_bright.append(Z[i])
i=i+1
return X_bright, Y_bright, Z_bright
#Here we augment the data with the previously declared functions, you should adapt if you don't want to use all the
# augmentation functions
X_bright, Y_bright, Z_bright = random_brightness(X, Y, Z)
X_tmp = np.concatenate((X, X_bright))
Y_tmp = np.concatenate((Y, Y_bright))
Z_tmp = np.concatenate((Z, Z_bright))
X_mirror, Y_mirror, Z_mirror = mirror_image(X_tmp,Y_tmp,Z_tmp)
X_final = np.concatenate((X_tmp, X_mirror))
Y_final = np.concatenate((Y_tmp, Y_mirror))
Z_final = np.concatenate((Z_tmp, Z_mirror))
print('Le dataset est desormais composé de', len(Z_final),'images')
y_final = [(Y_final), (Z_final)]
#define our model
def getModelCustom():
model_name = "../models/output_model/test_custom_v3.h5"
K.clear_session()
#Building the model
img_in = Input(shape=(120, 160, 3), name='img_in')
x = img_in
#Conv Layer 1
x = Conv2D(64, kernel_size=(3, 3), activation='relu')(x)
x = Conv2D(64, kernel_size=(3, 3), activation='relu')(x)
x = Conv2D(64, kernel_size=(3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)
#Conv Layer 2
x = Conv2D(128, kernel_size=(3, 3), activation='relu')(x)
x = Conv2D(128, kernel_size=(3, 3), activation='relu')(x)
x = Conv2D(128, kernel_size=(3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
#Conv Layer 3
x = Conv2D(128, kernel_size=(3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
#Conv Layer 4
x = Conv2D(256, kernel_size=(3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1))(x)
#Conv Layer 5
x = Conv2D(256, kernel_size=(3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1))(x)
#Conv Layer 6
x = Conv2D(512, kernel_size=(3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1, 1))(x)
x = Flatten(name='flattened')(x)
# Flatten to 1D (Fully connected)
x = Dense(1024, activation='relu', use_bias=False)(x)
x = Dropout(0.4)(x)
x = Dense(512, activation='relu', use_bias=False)(x)
x = Dropout(0.2)(x)
angle_out = Dense(1, activation='linear', name='angle_out', use_bias=False)(x)
throttle_out = Dense(1, activation='linear', name='throttle_out', use_bias=False)(x)
model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
model.compile(loss='mse', optimizer="adam", metrics=[metrics.mean_squared_error, metrics.mean_absolute_error, metrics.categorical_accuracy])
best_checkpoint = keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
try:
h_custom = model.fit(X_final, y_final, batch_size=64, validation_split=0.2, epochs=50, verbose=1, callbacks=[best_checkpoint])
except NameError:
print('Augmented data have not been found, original data will be used')
h_custom = model.fit(X, Y, batch_size=64, epochs=50, validation_split=0.2, verbose=1, callbacks=[best_checkpoint])
history_custom = pd.DataFrame(h_custom.history, index=h_custom.epoch)
history_custom.to_csv('../models/dataframe_model/test_custom_v3.csv', index = False)
model.summary()
return model
model_custom = getModelCustom()
def getModelDonkeyCar():
model_name_donkey = "../models/output_model/test_donkey_v3.h5"
model = keras.models.load_model("../models/model_entree/mypilot.h5")
model.compile(optimizer='adam', loss = 'mse',
metrics=[metrics.mean_squared_error,
metrics.mean_absolute_error,
#metrics.mean_absolute_percentage_error,
metrics.categorical_accuracy])
best_checkpoint_2 = keras.callbacks.ModelCheckpoint(model_name_donkey, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
try:
h_donkey = model.fit(X_final, y_final, batch_size=64, validation_split=0.2, epochs=50, verbose=1, callbacks=[best_checkpoint_2])
except NameError:
print('Augmented data have not been found, original data will be used')
h_donkey = model.fit(X, Y, batch_size=64, epochs=50, validation_split=0.2, verbose=1, callbacks=[best_checkpoint_2])
history_donkey = pd.DataFrame(h_donkey.history, index=h_donkey.epoch)
history_donkey.to_csv('../models/dataframe_model/test_donkey_v3.csv', index = False)
model.summary()
return model
model_donkey = getModelDonkeyCar()
| [
"afougere@sigma.fr"
] | afougere@sigma.fr |
3be20940bd4db0860f3e1d83ea84e1c20e51a059 | 62307a5958538a884e8c25790af05c758e78deb5 | /urls.py | 8d465d370696d944e55b8244ed15f26dbe4e9996 | [] | no_license | sdb/sdb | 0e22d1309ff1ef8b6e940c796854d403728910af | 33c0d17160d47b02d243743b003ab0b978993a8d | refs/heads/master | 2016-09-15T14:30:35.080053 | 2012-05-04T13:45:47 | 2012-05-04T13:45:47 | 868,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
from sdb.sitemap import MainSitemap
admin.autodiscover()
sitemaps = {
'flatpages': FlatPageSitemap,
'main' : MainSitemap,
}
urlpatterns = patterns('',
(r'^home/', include('sdb.home.urls')),
(r'^contact/', include('sdb.contact.urls')),
(r'^stream/', include('sdb.social.urls')),
(r'^photos/', include('sdb.photos.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
)
if settings.DEBUG:
urlpatterns += patterns('django.views.static',
(r'^media/(?P<path>.*)$',
'serve', {
'document_root': settings.MEDIA_ROOT,
'show_indexes': True }),)
urlpatterns += patterns('',
(r'^', include('sdb.core.urls')),)
| [
"stefan@ellefant.be"
] | stefan@ellefant.be |
de5174ed049e9fbe60804f4ca25fd8cfed046983 | c25b5ccd210eb3223695a1c3b6aba6beba9095f1 | /urls.py | 1aea993d3b00a35760d66db7da08996025bb9db8 | [] | no_license | evadunn/PythonClub | 784a211d818a09878b30cdb89f66ae0329098cff | 53355f75d25463e84bd5cab48af1aeea7e81b2fe | refs/heads/main | 2023-06-01T22:44:46.768116 | 2021-06-15T18:31:13 | 2021-06-15T18:31:13 | 357,615,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | """PythonClub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
urlpatterns = [
path('admin/', admin.site.urls),
path('club/', include('club.urls')),
path('accounts/', include('django.contrib.auth.urls')),
]
| [
"noreply@github.com"
] | evadunn.noreply@github.com |
d4dff6a9a6c2a14949864b231b50b23fac31547f | 04dfecbd42edcfd9ee8c8f6219a2c8638d0c0acd | /LeetCode/136.只出现一次的数字.py | 8f8f8ce182343703acc0e84344dec1aeafcdae0d | [] | no_license | weepwood/PythonDemo | 18a88d54855b722296d7f5823d122db99b2da5a5 | 7b918c970073535a3680c9296ba6dab73014c32d | refs/heads/master | 2023-01-27T20:46:24.284072 | 2020-12-02T07:53:04 | 2020-12-02T07:53:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | # -*- coding: utf-8 -*-
"""
@Date : 2019/12/24 下午 9:29
@Author : long
@File : 136.只出现一次的数字.py
"""
"""
给定一个非空整数数组,除了某个元素只出现一次以外,其余每个元素均出现两次。找出那个只出现了一次的元素。
说明:你的算法应该具有线性时间复杂度。你可以不使用额外空间来实现吗?
示例 1: 示例 2:
输入: [2,2,1] 输入: [4,1,2,1,2]
输出: 1 输出: 4
"""
"""
1.遍历nums中的每一个元素
2.如果某个nums中的数字是新出现的,则将它添加到列表中
3.如果某个数字已经在列表中,删除它
"""
class Solution:
@staticmethod
def singleNumber1(nums: "List[int]") -> "int": # 使用列表操作
no_duplicate_list = []
for i in nums:
if i not in no_duplicate_list:
no_duplicate_list.append(i)
else:
no_duplicate_list.remove(i)
return no_duplicate_list.pop()
@staticmethod
def singleNumber2(nums: "List[int]") -> "int": # 哈希表
hash_table = {}
for i in nums:
try:
hash_table.pop(i)
except:
hash_table[i] = 1
return hash_table.popitem()[0]
if __name__ == '__main__':
a = Solution.singleNumber1([2, 2, 1, 1, 3, 3, 5, 4, 4])
b = Solution.singleNumber2([2, 2, 1, 1, 3, 3, 5, 4, 4])
print(a)
print(b)
| [
"885240677@qq.com"
] | 885240677@qq.com |
27e9139ce7c4cb54a15ea6951cf71b700ac27663 | a36c23f6ab5603a779bfbbf9adf92a8e8f3354e7 | /ngz-geoviz/datahub/migrations/0007_train_traincategory.py | 2e2f807e09be647b7af8d23232f819b2f1ded5d3 | [
"MIT",
"CC-BY-4.0"
] | permissive | cuulee/ngz-geoviz | 32263f4472cc2484ea67a900170908c875dbca93 | 9407c1d3003c43b63606c3209837a7a3cfb1fa05 | refs/heads/master | 2022-09-08T14:56:11.009081 | 2020-06-01T09:48:07 | 2020-06-01T09:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Generated by Django 3.0.3 on 2020-02-25 07:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('datahub', '0006_auto_20200224_1212'),
]
operations = [
migrations.AddField(
model_name='train',
name='trainCategory',
field=models.CharField(default='Cargo', max_length=20),
preserve_default=False,
),
]
| [
"joona@gispo.fi"
] | joona@gispo.fi |
85179a7df23d93d41204a91196ec6860f15abf57 | a25f379bf4ab36547e2a6fa59c4fbea78d0bd729 | /pev_photons/systematics/sens_fit.py | 1fd3b2b5a0287374deddb26b86749b4790cefd59 | [] | no_license | zdgriffith/pev-photons | 1275464b81aef6f0e7a8c8440dadb6c24581ee7c | 01c68fdafe398c1855329c25ccfba07b08a9a9e0 | refs/heads/master | 2021-07-26T00:56:19.689628 | 2018-11-17T17:04:06 | 2018-11-17T17:04:06 | 100,307,872 | 0 | 0 | null | 2018-08-15T19:12:19 | 2017-08-14T20:43:03 | Python | UTF-8 | Python | false | false | 3,070 | py | #!/usr/bin/env python
########################################################################
# Perform a sensitivity calculation fit on injected trials distributions
########################################################################
import numpy as np
from glob import glob
import argparse as argparse
from skylab.sensitivity_utils import fit
from skylab.template_injector import TemplateInjector
from pev_photons.utils.load_datasets import load_systematic_dataset
from pev_photons.utils.support import prefix, fig_dir
def sensitivity(args):
exp, mc, livetime, template_llh, template = load_systematic_dataset('galactic_plane', args.name, ncpu=args.ncpu,
seed=args.seed, year='2012')
inj = TemplateInjector(template=template,
gamma=args.alpha,
E0=args.E0,
Ecut=None,
seed=1)
inj.fill(exp, mc, livetime)
files = glob(prefix+'systematics/template_sens/2012/'+args.name+'_*.npy')
if 'Laputop' in args.name:
inj_list = range(0,171,17)
else:
inj_list = range(0,551,50)
frac = np.zeros(len(inj_list))
tot = np.zeros(len(inj_list))
for fname in files:
a = np.load(fname)
index = inj_list.index(a[0])
frac[index] += a[1]
tot[index] += a[2]
ni, ni_err, images = fit(inj_list, frac, tot,
0.9, ylabel="fraction TS > 0.90",
npar = 2, par = None,
image_base=fig_dir+'template/'+args.name+'_sens')
flux = inj.mu2flux(ni)
flux_err = flux * ni_err / ni
print (" ni ------------ %.2f +/- %.2f (p %.2f)" % (ni, ni_err, 0.9))
print (" flux ---------- %.2e +/- %.2e GeV^-1cm^-2s^-1\n" % (flux, flux_err))
sens_result = np.empty((1,),
dtype=[('ni', np.float), ('ni_err', np.float),
('flux', np.float), ('flux_err', np.float),
])
sens_result['ni'] = ni
sens_result['ni_err'] = ni_err
sens_result['flux'] = flux
sens_result['flux_err'] = flux_err
np.save(prefix+'/systematics/template/'+args.name+'_sens.npy', sens_result)
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Perform a sensitivity calculation fit',
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('--name', type=str, default='fermi_pi0',
help='The name of the template.')
p.add_argument("--alpha", type=float, default=3.0,
help='Spectral index of signal.')
p.add_argument("--E0", type=float, default=2e6,
help='Energy to normalize.')
p.add_argument("--ncpu", type=int, default=1,
help="Number of cores to run on.")
p.add_argument("--seed", type=int, default=1,
help='rng seed')
args = p.parse_args()
sensitivity(args)
| [
"zachary.griffith@icecube.wisc.edu"
] | zachary.griffith@icecube.wisc.edu |
36b89008aa63d7babf7a2cf5176fff5ca6a4c9c6 | 3ded38311016cc77546ab5da60260206e70a8b1e | /FunMooc/exos_UpyLaB/UpyLaB 6.13.py | 83ba6c93544cd8d69b050c6aaf8f9aa3421ba243 | [] | no_license | jfrancois974/Python | f414c0b983f3731eb3ffae85f520067ea6d31338 | c48bb451c560ea9c50f13863364a702ec8a66883 | refs/heads/main | 2023-01-07T14:40:12.553446 | 2020-11-06T10:11:07 | 2020-11-06T10:11:07 | 310,559,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | """
Auteur: Jean-François BATAILLE
Date : AVRIL 2020
Projet : Apprentissage Python 3
Objectif:
Voici le début d’une suite logique inventée par John Horton Conway (et connue donc sous le nom de suite de Conway).
1
1 1
2 1
1 2 1 1
1 1 1 2 2 1
3 1 2 2 1 1
Chaque ligne, à partir de la deuxième, décrit la précédente :
la première ligne, 1, est formée de un “1”, d’où la deuxième ligne : 1 1 ;
la troisième ligne décrit la deuxième ligne, où l’on voit deux “1”, d’où 2 1 ;
la quatrième ligne décrit la troisième ligne, où l’on voit un “2” et un “1”, d’où 1 2 1 1 ;
et ainsi de suite.
Écrire une fonction next_line(line) qui reçoit une lst d’entiers décrivant une ligne de cette suite,
et qui retourne la lst correspondant à la ligne suivante.
Exemples:
next_line([1, 2, 1, 1]) doit retourner: [1, 1, 1, 2, 2, 1]
next_line([1]) doit retourner: [1, 1]
next_line([]) doit retourner: [1]
"""
def next_line(line):
if line == []:
return [1]
count = 0
val = line[0]
lst = []
for n in line:
if n == val:
count += 1
else:
lst.append(count)
lst.append(val)
count = 1
val = n
lst.append(count)
lst.append(val)
return lst
print(next_line([2, 1]))
print(next_line([1, 3, 1, 1, 2, 2, 2, 1]))
print(next_line([3, 1, 1, 3, 1, 1, 2, 2, 2, 1, 2, 3, 2, 1, 1,
2, 1, 3, 3, 3, 1, 2, 2, 1, 1, 3, 1, 2, 1, 1, 3, 2, 1, 1]))
| [
"noreply@github.com"
] | jfrancois974.noreply@github.com |
4718642f952cbc04c2cf210cc8eabb5076c4614f | c87ae09a0229b4b4620c511b0c51eb685ec22b99 | /Python全栈学习/第三模块/LuffyFTP 功能重写/client/ftp_client.py | 0815c26ed99fd1dc3537004fd81cf58014bbae89 | [] | no_license | caideyang/python2018 | 050f4c29c37b5bec963e77e0724cd05a9350deed | b7a3a728ef36b43448dc5ff594fdba500b67ad53 | refs/heads/master | 2020-03-25T06:02:54.699941 | 2018-11-01T23:04:29 | 2018-11-01T23:04:29 | 143,480,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,685 | py | #!/usr/bin/python3
#@Author:CaiDeyang
#@Time: 2018/9/22 9:38
import optparse
import pickle
from socket import *
class FTPClient(object):
"""ftp客户端"""
def __init__(self):
parser = optparse.OptionParser()
parser.add_option("-s", "--server", dest="server", help="ftp server ip_addr")
parser.add_option("-P", "--prot", type="int", dest="port", help="ftp server port")
parser.add_option("-u", "--username", dest="username", help="username")
parser.add_option("-p", "--password", dest="password", help="password")
self.options, self.args = parser.parse_args()
print(self.options, self.args)
self.client = socket(AF_INET, SOCK_STREAM)
self.verify_data()
self.interactive()
def verify_data(self):
"""查看传入的参数是否正确"""
if not self.options.server or not self.options.port:
exit("Invalid options...")
def useage(self):
print("Usage: python %s -h" % (__file__))
def interactive(self):
# if self.options['server'] and self.options['port']:
try:
self.client.connect((self.options.server, self.options.port))
self.auth()
except:
self.useage()
def auth(self):
count = 0
while count < 3:
username = self.options.username or input("username:").strip()
password = self.options.password or input("password: ").strip()
count += 1
data = {'username': username, 'password': password, 'action_type': 'auth'}
self.client.send(pickle.dumps(data))
if __name__ == "__main__":
client = FTPClient() | [
"deyangcai@163.com"
] | deyangcai@163.com |
1394f29de96d1460e6d7afca6e5e7e0e58da3333 | 96c378e3b2f92df852782f0c662b10c142413575 | /user/migrations/0011_user_desc.py | 984e1a91f2b454c68eceac4c5c82ff8a74b1a861 | [] | no_license | Sndav/OTS | 0e6e1dc3f4705839f52b72cf973765945804a89e | 4fdc3c7a71e8c74115285c15e99cf4a5b7a8fea0 | refs/heads/master | 2020-04-11T14:52:03.923197 | 2019-03-07T08:22:28 | 2019-03-07T08:22:28 | 161,871,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | # Generated by Django 2.1.4 on 2019-03-04 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0010_auto_20190303_2048'),
]
operations = [
migrations.AddField(
model_name='user',
name='desc',
field=models.TextField(default='暂时没有描述信息', verbose_name='描述信息'),
),
]
| [
"bossstyle@126.com"
] | bossstyle@126.com |
991875aa1b15d197046ecda9d4eab939362b01ef | bf3fecb4064c99f398042846dabf990975dbaf9b | /Air03/PM25.py | 21b687d28a01b3a3d44562da9f82734e037fb821 | [] | no_license | sswxl/Air_Quality_Analysis | 15eba41f142ec4c1d28e2853a5f3c73c71ba6e29 | 5e290be4eb8e57e3053e9d0f9e3cf23f754d2152 | refs/heads/master | 2023-06-27T22:15:12.268086 | 2021-01-05T09:20:57 | 2021-01-05T09:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,128 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
dd=pd.read_csv(open('./PM25city.csv',encoding='utf-8'))
data=dd[dd.AQI>0]
data=dd[dd.PM25>0]
citydict={}
aqidict={}
aqilist=[]
for index, row in data.iterrows():
AQI=row['AQI']
year=row['year']
month=row['month']
if(month<10):
month='0'+str(month)
day=row['day']
if (day < 10):
day = '0' + str(day)
date=str(year) + str(month) + str(day)
city=row['city']
if (citydict.get(city)):
aqidict=citydict.get(city)
if (aqidict.get(date)):
aqilist = aqidict.get(date)
aqilist.append(AQI)
else:
aqilist = []
aqilist.append(AQI)
aqidict[date] = aqilist
else:
aqidict={}
aqilist = []
aqilist.append(AQI)
aqidict[date] = aqilist
citydict[city] = aqidict
for (city,aqidict) in citydict.items():
for (date,aqilist) in aqidict.items():
sum=0
count=0
for i in range(0,len(aqilist)):
count=count+1
sum+=aqilist[i]
avg=sum / count
aqilist.append(sum/count)
write_clo = [city, date, avg, ]
df = pd.DataFrame(columns=write_clo)
df.to_csv('result.csv', line_terminator="\n", index=False, mode='a', encoding='utf8')
# result=pd.read_csv(open('F:/研二上/大数据hadoop/PM25city/result.csv',encoding='utf-8'))
# citydict2={}
# for index, row in result.iterrows():
# city = row['city']
# month=str(row['date'])[:6]
# aqi = row['aqi']
# if (citydict2.get(city)):
# aqidict=citydict2.get(city)
# if (aqidict.get(month)):
# aqilist = aqidict.get(month)
# if aqi <= 50:
# aqilist[0] = aqilist[0] + 1
# elif aqi <= 100:
# aqilist[1] = aqilist[1] + 1
# elif aqi <= 100:
# aqilist[2] = aqilist[2] + 1
# elif aqi <= 100:
# aqilist[3] = aqilist[3] + 1
# elif aqi <= 100:
# aqilist[4] = aqilist[4] + 1
# else:
# aqilist[5] = aqilist[5] + 1
# else:
# aqilist = aqilist = [0,0,0,0,0,0]
# if aqi <= 50:
# aqilist[0] = aqilist[0] + 1
# elif aqi <= 100:
# aqilist[1] = aqilist[1] + 1
# elif aqi <= 100:
# aqilist[2] = aqilist[2] + 1
# elif aqi <= 100:
# aqilist[3] = aqilist[3] + 1
# elif aqi <= 100:
# aqilist[4] = aqilist[4] + 1
# else:
# aqilist[5] = aqilist[5] + 1
# aqidict[month] = aqilist
# else:
# aqidict={}
# aqilist = [0,0,0,0,0,0]
# if aqi<=50:
# aqilist[0]=aqilist[0]+1
# elif aqi<=100:
# aqilist[1]=aqilist[1]+1
# elif aqi<=100:
# aqilist[2]=aqilist[2]+1
# elif aqi<=100:
# aqilist[3]=aqilist[3]+1
# elif aqi<=100:
# aqilist[4]=aqilist[4]+1
# else:
# aqilist[5] = aqilist[5] + 1
# aqidict[month] = aqilist
# citydict2[city] = aqidict
#
# for (city,aqidict) in citydict2.items():
# for (month, aqilist) in aqidict.items():
# write_clo = [city, month, aqilist ]
# df = pd.DataFrame(columns=write_clo)
# df.to_csv('handle.csv', line_terminator="\n", index=False, mode='a', encoding='utf8')
# result=pd.read_csv(open('F:/研二上/大数据hadoop/PM25city/result.csv',encoding='utf-8'))
# citydict3={}
#
# for index, row in result.iterrows():
# city = row['city']
# month=str(row['date'])[:6]
# aqi = row['aqi']
# if (citydict3.get(city)):
# aqilist = citydict3.get(city)
# if aqi<=50:
# aqilist[0]=aqilist[0]+1
# elif aqi<=100:
# aqilist[1]=aqilist[1]+1
# elif aqi<=100:
# aqilist[2]=aqilist[2]+1
# elif aqi<=100:
# aqilist[3]=aqilist[3]+1
# elif aqi<=100:
# aqilist[4]=aqilist[4]+1
# else:
# aqilist[5] = aqilist[5] + 1
# else:
# aqilist = [0,0,0,0,0,0]
# if aqi <= 50:
# aqilist[0] = aqilist[0] + 1
# elif aqi <= 100:
# aqilist[1] = aqilist[1] + 1
# elif aqi <= 100:
# aqilist[2] = aqilist[2] + 1
# elif aqi <= 100:
# aqilist[3] = aqilist[3] + 1
# elif aqi <= 100:
# aqilist[4] = aqilist[4] + 1
# else:
# aqilist[5] = aqilist[5] + 1
# citydict3[city] = aqilist
#
# for (city,aqi) in citydict3.items():
# write_clo = [city, aqi[0], aqi[1],aqi[2],aqi[3],aqi[4],aqi[5]]
# df = pd.DataFrame(columns=write_clo)
# df.to_csv('pie.csv', line_terminator="\n", index=False, mode='a', encoding='utf8') | [
"1144349266@qq.com"
] | 1144349266@qq.com |
c2f94c851ad8e7acf9199934c932789fcc57765a | bd5a446017eddbb927e12ac04c82287e46dc6088 | /openaction/base/context.py | c96402adb0e66fafcfeec5042dbdb00ff6f913c1 | [] | no_license | idlweb/open-action | 40395661bc0cc51b10d95ecfe7832a6d3293194f | c46e34c2fbc19c5bf88f1cda9555b475d6011d22 | refs/heads/master | 2021-01-17T05:17:15.561983 | 2013-01-11T16:04:39 | 2013-01-11T16:04:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | from action.models import Geoname, ActionCategory
from django.conf import settings
from users.forms import UserRegistrationForm
def global_context(request):
context = {}
context['main_arguments'] = ActionCategory.objects.all()
context['main_locations'] = Geoname.objects.all()
context['LESS_DEBUG'] = settings.LESS_DEBUG
context['LOGIN_URL'] = settings.LOGIN_URL
context['LOGOUT_URL'] = settings.LOGOUT_URL
if not request.user.is_authenticated():
context['registration_form'] = UserRegistrationForm()
return context | [
"joke2k@gmail.com"
] | joke2k@gmail.com |
fa07c03e34407de62bf96dedd9aba32ec4629fde | 5aa19a95b9c128009dc2c8fee2e40233fb60e89a | /DB/AdaptorDB.py | 2187acf39e94102074e033ca4ce79e2e3d6d9fb2 | [] | no_license | greenrain78/ERBS-DB-Client | f269d79f6be91af68d25acece3aaa02dad04b53a | b9d30d491e2174b83623c70d6839a4f069aaec16 | refs/heads/master | 2023-02-26T04:49:49.428264 | 2021-02-02T22:19:55 | 2021-02-02T22:19:55 | 330,716,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,932 | py | from datetime import datetime
from typing import List
from DB.SQL import initGamesSQL, initUserSQL, userTableName, gamesTableName, gamesTable
from DB.mariaDB import runSQL, getSQL
from DB.DBLog import getLogger
log = getLogger()
def initDB():
# init DB
runSQL(initGamesSQL)
runSQL(initUserSQL)
log.info('init DB in [%s]', __name__)
def insert(name: str, userNum: int):
sql = f'insert into {userTableName} ' \
f'(name, userNum) values(' \
f'"{name}", {userNum})'
runSQL(sql)
log.debug(f"{name} insert userNum{userNum}")
def getUserList() -> List[tuple]:
sql = f"select name, userNum from {userTableName}"
result = getSQL(sql)
return result
def insertGames(data: dict):
# games
sql = f'insert into {gamesTableName}('
for key in data:
sql += f'{key}, '
sql = sql[:-2] + ') values( '
for key, val in data.items():
if key == 'startDtm':
val = val.translate({ord('T'): ' '})
val = val[:-5]
print(val)
val = datetime.strptime(val, '%Y-%m-%d %H:%M:%S.%f')
log.debug(f"test: {val}")
sql += f'"{val}", '
elif type(val) is str:
sql += f'"{val}", '
elif type(val) is dict:
sql += f'"{val}", '
else:
sql += f'{val}, '
sql = sql[:-2] + ')'
runSQL(sql)
log.debug(f"insertGames user({data['nickname']})game:{data['gameId']}")
def getGameID() -> int:
sql = f"select gameId from {gamesTableName} ORDER BY gameId DESC LIMIT 1"
result = getSQL(sql)
if not result:
return None
else:
return result[0][0]
def checkGameID(gameID: int, userNum: int) -> bool:
sql = f"select gameId from {gamesTableName} WHERE gameId = {gameID} AND userNum = {userNum} "
result = getSQL(sql)
print(result)
if not result:
return False
else:
return True
| [
"kimdw010130@daum.net"
] | kimdw010130@daum.net |
1543c88cfd5d500fde13adf822144f8e4493ff15 | fb53ca8b57bd84e5506bd958305e95f052f8e1ba | /CorpusBuilder/getLinksOnline.py | c4e3c260646abcc24ac14d51aefb497b87d89986 | [] | no_license | mikhaylova-daria/NER | bbbfe6f2cac4135a66e6a9d4f7ac6446fc76f7b4 | ec87664a9b247428226a5d2e49cd6dd8d3937043 | refs/heads/master | 2021-01-19T03:43:47.869069 | 2016-06-13T19:22:47 | 2016-06-13T19:22:47 | 44,476,041 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,950 | py | import multiprocessing
__author__ = 'daria'
import Levenshtein
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
trash = [u'File:', u'Portal:', u'Main_Page', u'Wikipedia:', u'Help:', u'Special:', u'Talk:', u'Category:', u'Template:',
u'Template_talk:']
# take links with tag "href"
def is_wiki_links(tag):
if tag.has_attr('href') and tag['href'][:6] == '/wiki/':
for name in trash:
if tag['href'].find(name) != -1:
return False
return True
else:
return False
# estimate links: takes only one links for mention
def mention_estimate(mention, mentions):
best_href = ''
if mention != '':
#print mention
max = 0.
for href in mentions[mention]:
l = Levenshtein.jaro(href[6:], mention)
if l >= max:
max = l
best_href = href
return best_href
import re
# get links using url of article
def get_links_online(url):
url = url.strip()
text = requests.get(url).text
soup = BeautifulSoup(text)
title = soup.title.text.encode('utf-8')
dirName = "pioNER_Wiki_Links/different"
if re.match(r'[A-Z]', url[len("https://en.wikipedia.org/wiki/"):]) is not None:
dirName = "pioNER_Wiki_Links/" + url[len("https://en.wikipedia.org/wiki/")]
else:
return
links = open(dirName + '/'+url[len("https://en.wikipedia.org/wiki/"):].replace('_', ' '), 'w')
links.write(url[len("https://en.wikipedia.org/")-1:]+'\t'
+title[:-len(' - Wikipedia, the free encyclopedia')] + '\n')
hrefs = defaultdict(set)
mentions = defaultdict(set)
wiki_links = soup.find_all(is_wiki_links)
for tag in wiki_links:
if tag.has_attr('title'):
hrefs[tag['href'].encode('utf-8')].add(tag['title'].encode('utf-8'))
hrefs[tag['href'].encode('utf-8')].add(tag.text.encode('utf-8'))
mentions[tag.text.encode('utf-8')].add(tag['href'].encode('utf-8'))
for mention in mentions:
#links.write(href)
#for mention in hrefs[href]:
#if Levenshtein.distance(mention_estimate(mention)[6:], mention)
# *2./(1+len(mention_estimate(mention)[6:] + mention)) < 0.8:
if Levenshtein.setratio(mention_estimate(mention, mentions)[6:].split('_'),
mention.replace('\xc2\xa0', ' ').split()) >= 0.5:
#print mention_estimate(mention, mentions), mention
links.write(mention_estimate(mention, mentions) + '\t'+mention)
links.write('\n')
links.close()
import sys
import os
def get_links_online_for_corpus(processes=multiprocessing.cpu_count()):
if __name__ == '__main__':
try:
os.mkdir("pioNER_Wiki_Links")
except OSError, e:
if e.errno != 17:
raise
pass
listdir = os.listdir("pioNER_Wiki_Links/")
dirName = 'different'
if listdir.count(dirName) == 0:
os.mkdir("pioNER_Wiki_Links/" + dirName)
for dirName in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:
if listdir.count(dirName) == 0:
os.mkdir("pioNER_Wiki_Links/" + dirName)
if not os.path.exists(sys.argv[1] + 'contents'):
corpus_contents_file = open("contents", 'w')
home = sys.argv[1]
for root, dirs, files in os.walk(home):
for name in files:
corpus_contents_file.write(sys.argv[2] + name.replace(' ', '_')+'\n')
corpus_contents_file.close()
pool = multiprocessing.Pool(processes)
corpus_contents_file = open("contents", 'r')
pool.map(get_links_online, corpus_contents_file)
pool.terminate()
get_links_online_for_corpus()
| [
"dasham94@yandex.ru"
] | dasham94@yandex.ru |
32de168ddd59c44746217ded4fbc47aefd0220b8 | 9fff6cf2f8a777c2cbe84ff232f65dcf6def0ec4 | /lcs/views.py | 86bf58a29a91dad0bf41c5a41e7a3ecd5c29b442 | [] | no_license | dougintexas/dougintexas.github.io | 71893d2e42e9679eefddeb7175e0ee01687b6187 | 619e2f43837121b29b725f0143ef497552742c79 | refs/heads/master | 2021-07-02T03:06:06.700732 | 2020-01-31T20:52:54 | 2020-01-31T20:52:54 | 62,011,280 | 0 | 0 | null | 2021-06-10T19:14:37 | 2016-06-26T22:34:25 | HTML | UTF-8 | Python | false | false | 166 | py | from django.http import HttpResponse
from django.views.decorators.clickjacking import xframe_options_exempt
@xframe_options_exempt
def home(request):
return ""
| [
"atadams@gmail.com"
] | atadams@gmail.com |
106e275868007d2c55256b1422e7451549b6f3e8 | e3fc83e77e218f7b8df4b14b0753fd65afd4b923 | /downloaded_kernels/loan_data/converted_notebooks/kernel_39.py | 1359e1b6237eaebdd7a85313245855caf0629b40 | [
"MIT"
] | permissive | jupste/wranglesearch | 982684fdaa7914af59758880fdc3a4ff3346477f | a6978fae73eee8ece6f1db09f2f38cf92f03b3ad | refs/heads/master | 2023-06-18T04:46:34.474046 | 2021-07-15T23:43:24 | 2021-07-15T23:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,558 | py |
# coding: utf-8
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# * ***Major questions to answer(A/B Testing):***
# 1. Does the installment amount affect loan status ?
# 2. Does the installment grade affect loan status ?
# 3. Which grade has highest default rate ?
# 4. Does annual income/home-ownership affect default rate ?
# 5. Which state has highest default rate ?
# * ***Text Analysis - Phase 2 ***
# 6. Is it that a people with a certain empoyee title are taking up more loans as compared to others ?
# 7. Does a specific purpose affect loan status ?
# * ***Model Building - Phase 3***
# 8. Trying various models and comparing them
# ***Visualization(Exploratory data analysis) - Phase 1 ***
# In[50]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Importing the libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
# Reading the dataset
data = pd.read_csv("../input/loan.csv")
data_1 = pd.DataFrame(data) # Creating a copy
# Checking the dataset
data.head()
data.tail()
data.describe()
data = data.iloc[:,2:-30].values
# In[51]:
# Setting the target vector
status = data[:,14]
unique_labels = np.unique(status, return_counts = True)
# print(unique_labels)
plt.figure()
plt.bar(unique_labels[0],unique_labels[1])
plt.xlabel('Type of label')
plt.ylabel('Frequency')
plt.title('Status categories')
plt.show()
category = unique_labels[0]
frequency = unique_labels[1]
category_count = np.vstack((category,frequency))
category_list = np.array(category_count.T).tolist()
category_list_1 = pd.DataFrame(category_list)
print(category_list_1)
# Let us consider only 2 major categories "Charged off" and "Fully Paid". A few reasons to do this:
# 1. To convert it into a binary cassification problem, and to analyze in detail the effect of important variables on the loan status.
# 2. A lot of observations show status "Current", so we do not know whether it will be "Charged Off", "Fully Paid" or "Default".
# 3. The observations for "Default" are too less as compared to "Fully Paid" or "Charged Off", to thoughroly investigate those observations with loan status as "Default".
# 4. The remaining categories of "loan status" are not of prime importance for this analysis.
#
# In[52]:
category_one_data = data_1[data_1.loan_status == "Fully Paid"]
category_two_data = data_1[data_1.loan_status == "Charged Off"]
new_data = np.vstack((category_one_data,category_two_data))
# new_data_copy = pd.DataFrame(new_data)
new_data = new_data[:,2:-30]
new_data_df = pd.DataFrame(new_data)
# **Exploratory Data Analysis**
# 1. Variable under inspection:Installment amount
# Whether there is any trend with respect to Installment amount.
# For eg: Higher the installment amount higher the number of "Charged Off" observations ?
#
# In[53]:
# Creating bins for various installment amounts
installment_amt = new_data[:,5]
bins = np.linspace(installment_amt.min(), installment_amt.max(), 10)
installment_amt = installment_amt.astype(float).reshape(installment_amt.size,1)
binned_installment_amt = pd.DataFrame(np.digitize(installment_amt, bins))
installment_groups = (np.array(np.unique(binned_installment_amt, return_counts = True))).T
# A bar plot to figure out the distribution of installment amount
plt.figure()
plt.bar(installment_groups[:,0],installment_groups[:,1])
plt.xlabel('Installment_amt_grp')
plt.ylabel('Frequency')
plt.title('Distribution of Installment amount categories')
plt.show()
# Appending the installment_groups to status
status_new = new_data_df[14]
factored_status = np.array(pd.factorize(status_new)) # 0's = Fully Paid, 1's = Charged Off
status_labels = pd.DataFrame(factored_status[0])
status_installment_groups = pd.DataFrame(np.hstack((binned_installment_amt,status_labels)))
status_installment_groups.columns = ['Installment_amt_grp','status_labels']
# Looking for a trend in the defaulted observations
Charged_off = status_installment_groups[status_installment_groups.status_labels == 1]
temp_1 = Charged_off.iloc[:,0].values
plot_var_1 = np.array(np.unique(temp_1, return_counts = True))
plot_var_1 = plot_var_1[:,:-1]
plot_var_11 = plot_var_1.T # Eliminating the 10th, since as only one reading
# Looking for a trend in the successful observations
Fully_paid = status_installment_groups[status_installment_groups.status_labels == 0]
temp_2 = Fully_paid.iloc[:,0].values
plot_var_2 = np.array(np.unique(temp_2, return_counts = True))
plot_var_22 = plot_var_2.T
# Concatenating the two variables
plot_var_stack = np.hstack((plot_var_11,plot_var_22))
plot_var_stack = pd.DataFrame(plot_var_stack)
plot_var_stack = plot_var_stack.drop(plot_var_stack.columns[2], axis=1)
plot_var_stack.columns = ['Installment_amt_grp','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack['Charged Off'], plot_var_stack['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack['Fully Paid'], totals)]
plot_var_stack = np.array(plot_var_stack)
group_number = plot_var_stack[:,0]
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Installment_amt_grp')
plt.ylabel('Percent loan status')
plt.title('Installment amount categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# Though we can observe a slight variation in the "% Charged Off" values, overall we can say that the installment amount does not seem to affect the loan status.
#
# 2) Variable under inspection:Grade.
# Whether the grade affects the Installment amount ?
# In[54]:
installment_grade = new_data[:,6]
# print(np.unique(installment_grade, return_counts = True))
installment_grade_list = np.array(np.unique(installment_grade, return_counts = True))
installment_grade_df = pd.DataFrame(installment_grade_list.T)
print(installment_grade_df)
# Distribution of Installment grade
plt.figure()
plt.bar(installment_grade_df[0],installment_grade_df[1])
plt.xlabel('Installment_grade')
plt.ylabel('Frequency')
plt.title('Distribution of Installment grade categories')
plt.show()
installment_grade = pd.DataFrame(installment_grade)
status_installment_grade = pd.DataFrame(np.hstack((installment_grade,status_labels)))
status_installment_grade.columns = ['Installment_grade','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_grade = status_installment_grade[status_installment_grade.status_labels == 1]
temp_11 = Charged_off_grade.iloc[:,0].values
plot_var_grade = np.array(np.unique(temp_11, return_counts = True))
plot_var_grade_11 = plot_var_grade.T
# Looking for a trend in the successful observations
Fully_Paid_grade = status_installment_grade[status_installment_grade.status_labels == 0]
temp_22 = Fully_Paid_grade.iloc[:,0].values
plot_var_grade_2 = np.array(np.unique(temp_22, return_counts = True))
plot_var_grade_22 = plot_var_grade_2.T # Eliminating the 10th, since as only one reading
# Concatenating the two variables
plot_var_stack_1 = np.hstack((plot_var_grade_11,plot_var_grade_22))
plot_var_stack_1 = pd.DataFrame(plot_var_stack_1)
plot_var_stack_1 = plot_var_stack_1.drop(plot_var_stack_1.columns[2], axis=1)
plot_var_stack_1.columns = ['Installment_grade_grp','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_1['Charged Off'], plot_var_stack_1['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_1['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_1['Fully Paid'], totals)]
# plot_var_stack_1 = np.array(plot_var_stack_1)
group_number = plot_var_stack_1['Installment_grade_grp']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Installment_grade')
plt.ylabel('Percent loan status')
plt.title('Installment grade categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# 1. The grade does seem to affect the default rate: Higher the grade higher the percentage of "Charged Off" loans.
# 2. Also from the plot we can conclude that Grade G has the highest "% Charged Off"
# 3. To further investigate this we need to know what the Grade refers to, does it represent risk factor in lending the money ?
# If yes, then the results make sense: Higher the grade higher the risk factor.
# 4. Also, from the distribution plot we can see that they are already lending only a handful amount of loans to people classified in "Grade G". They should be more precautious in their approach to lending money to customers who are classified to be in higher grades.
#
# 3) Variable under inspection:Home Status
# In[55]:
home_status = new_data_df[10]
# print(np.unique(home_status, return_counts = True))
home_status_list = np.array(np.unique(home_status, return_counts = True))
home_status_df = pd.DataFrame(home_status_list.T)
print(home_status_df)
# Distribution of Emp_length
plt.figure()
plt.bar(home_status_df[0],home_status_df[1])
plt.xlabel('Home Status')
plt.ylabel('Frequency')
plt.title('Home Status categories')
plt.show()
home_status = pd.DataFrame(home_status)
status_home_status = pd.DataFrame(np.hstack((home_status,status_labels)))
status_home_status.columns = ['Home Status','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_home_status = status_home_status[status_home_status.status_labels == 1]
temp_41 = Charged_off_home_status.iloc[:,0].values
plot_var_home_status = np.array(np.unique(temp_41, return_counts = True))
plot_var_home_status_44 = pd.DataFrame(plot_var_home_status.T)
# Looking for a trend in the successful observations
Fully_Paid_home_status = status_home_status[status_home_status.status_labels == 0]
temp_42 = Fully_Paid_home_status.iloc[:,0].values
plot_var_home_status_2 = np.array(np.unique(temp_42, return_counts = True))
plot_var_home_status_55 = pd.DataFrame(plot_var_home_status_2.T) # Eliminating the 10th, since as only one reading
plot_var_home_status_55 = plot_var_home_status_55.drop(0) # Eliminating the home status = "any", since as only one reading
# Concatenating the two variables
plot_var_stack_3 = np.hstack((plot_var_home_status_44,plot_var_home_status_55))
plot_var_stack_3 = pd.DataFrame(plot_var_stack_3)
plot_var_stack_3 = plot_var_stack_3.drop(plot_var_stack_3.columns[2], axis=1)
plot_var_stack_3.columns = ['Home Status','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_3['Charged Off'], plot_var_stack_3['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_3['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_3['Fully Paid'], totals)]
#plot_var_stack_3 = np.array(plot_var_stack_3)
group_number = plot_var_stack_3['Home Status']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Home Status')
plt.ylabel('Percent loan status')
plt.title('Home Status categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# From the stacked percentage plot, we can observe that the feature "Home Status" has no potential effect on our target variable "loan status"
# 4) Variable under inspection:Annual Income
#
# To investigate this variable I create four bins to classify the annual income:
# 1. People earning less than USD 40,000.
# 2. People earning between USD 40,000 to USD 70,000.
# 3. People earning between USD 70,000 to USD 100,000.
# 4. People earning more than USD 100,000,
#
#
# In[56]:
## Now checking the effect of annual income on loan status
# Creating bins for various income amounts
annual_income = new_data[:,11]
#bins_2 = np.linspace(annual_income.min(), annual_income.max(), 3)
bins_2 = np.array([40000,70000,100000,150000])
annual_income = annual_income.astype(float).reshape(annual_income.size,1)
binned_annual_income = pd.DataFrame(np.digitize(annual_income, bins_2))
annual_groups = (np.array(np.unique(binned_annual_income, return_counts = True))).T
# A bar plot to figure out the distribution of income amount
plt.figure()
plt.bar(annual_groups[:,0],annual_groups[:,1])
plt.xlabel('Annual income amount group')
plt.ylabel('Frequency')
plt.title('Annual income amount categories')
plt.legend(loc="upper right")
plt.show()
# Appending the income_groups to status
status_annual_groups = pd.DataFrame(np.hstack((binned_annual_income,status_labels)))
status_annual_groups.columns = ['Annual_income_grp','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_annual_income = status_annual_groups[status_annual_groups.status_labels == 1]
temp_51 = Charged_off_annual_income.iloc[:,0].values
plot_var_annual_income = np.array(np.unique(temp_51, return_counts = True))
plot_var_annual_income_66 = pd.DataFrame(plot_var_annual_income.T)
# Looking for a trend in the successful observations
Fully_Paid_annual_income = status_annual_groups[status_annual_groups.status_labels == 0]
temp_52 = Fully_Paid_annual_income.iloc[:,0].values
plot_var_annual_income_2 = np.array(np.unique(temp_52, return_counts = True))
plot_var_annual_income_77 = pd.DataFrame(plot_var_annual_income_2.T) # Eliminating the 10th, since as only one reading
#plot_var_annual_income_55 = plot_var_home_status_55.drop(0) # Eliminating the home status = "any", since as only one reading
# Concatenating the two variables
plot_var_stack_4 = np.hstack((plot_var_annual_income_66,plot_var_annual_income_77))
plot_var_stack_4 = pd.DataFrame(plot_var_stack_4)
plot_var_stack_4 = plot_var_stack_4.drop(plot_var_stack_4.columns[2], axis=1)
plot_var_stack_4.columns = ['Annual Income Group','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_4['Charged Off'], plot_var_stack_4['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_4['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_4['Fully Paid'], totals)]
#plot_var_stack_4 = np.array(plot_var_stack_4)
group_number = plot_var_stack_4['Annual Income Group']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
#Axes.axhline(y=mean_C_Off)
plt.xlabel('Annual income amount group')
plt.ylabel('Percent loan status')
plt.title('Annual income amount categories')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# We can observe a slight downword trend, which suggests that people with higher income are less likely to get "Charged Off".
#
# 5) Variable under inspection:State
# An important question here would be to check whether the state affects the loan status. Also, to find out which state has highest "% Charged Off".
# In[57]:
# Separating the variable under investigation
state = new_data_df[21]
#print(np.unique(state, return_counts = True))
state_list = np.array(np.unique(state, return_counts = True))
state_df = pd.DataFrame(state_list.T)
print(state_df)
# Distribution of Emp_length
plt.figure()
plt.bar(state_df[0],state_df[1])
plt.xlabel('State')
plt.ylabel('Frequency')
plt.title('State')
plt.show()
state = pd.DataFrame(state)
status_state = pd.DataFrame(np.hstack((state,status_labels)))
status_state.columns = ['State','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_state = status_state[status_state.status_labels == 1]
temp_61 = Charged_off_state.iloc[:,0].values
plot_var_state = np.array(np.unique(temp_61, return_counts = True))
plot_var_state_88 = pd.DataFrame(plot_var_state.T)
# Looking for a trend in the successful observations
Fully_Paid_state = status_state[status_state.status_labels == 0]
temp_62 = Fully_Paid_state.iloc[:,0].values
plot_var_state_2 = np.array(np.unique(temp_62, return_counts = True))
plot_var_state_99 = pd.DataFrame(plot_var_state_2.T)
# * We know US has only 50 States, but we have a list of 51 states. On investigation we can see that DC is added as a state even when it isn't a state.
# * We also notice that its present in both the cases, charged off as well as in fully paid observations.
# * So I decide on just eliminating DC from the list (Keep this in mind) .
# * Also, states like ME and ND have no people with "Charged Off" observations, so we will just take them off the list as well and check for any trends in the state variable.
#
# In[58]:
plot_var_state_88 = plot_var_state_88.drop(7)
plot_var_state_99 = plot_var_state_99.drop([7,21,28]) # Eliminating the home status = "any", since as only one reading
# Concatenating the two variables
plot_var_stack_5 = np.hstack((plot_var_state_88,plot_var_state_99))
plot_var_stack_5 = pd.DataFrame(plot_var_stack_5)
plot_var_stack_5 = plot_var_stack_5.drop(plot_var_stack_5.columns[2], axis=1)
plot_var_stack_5.columns = ['state','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_5['Charged Off'], plot_var_stack_5['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_5['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_5['Fully Paid'], totals)]
#plot_var_stack_5 = np.array(plot_var_stack_5)
group_number = plot_var_stack_5['state']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
plt.xlabel('State')
plt.ylabel('Percent loan status')
plt.title('State')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
###### Sort in order and print top 5 states with max default % ########
# Concatenating C_Off and state
C_Off = pd.DataFrame(C_Off)
temp_plot = np.hstack((plot_var_stack_5, C_Off))
temp_plot = pd.DataFrame(temp_plot)
temp_plot.columns = ['state','Charged Off','Fully Paid','% Charged Off']
temp_plot = np.array(temp_plot.sort_values(by = '% Charged Off',ascending = False))
print(temp_plot[0:5,(0,3)])
temp_plot = pd.DataFrame(temp_plot)
temp_plot.columns = ['state','Charged Off','Fully Paid','% Charged Off']
temp_plot = temp_plot.drop(['Charged Off', 'Fully Paid'], axis = 1)
# * We can observe that there is variation of "% Chharged Off" in the percent stacked plot.
# * Though we cannot draw any strong conclusions of whether the "% Charged Off" is affected by the "State" variable, we can answer our question of which state has the highest "% Charged Off"?
# * We could see state of Tennessee has the highest "% Charged Off" of 23.21%
# In[59]:
# Chloropleth map for better visualization
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.offline import init_notebook_mode,iplot
init_notebook_mode(connected=True)
for col in temp_plot.columns:
temp_plot[col] = temp_plot[col].astype(str)
scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'], [0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
#temp_plot['text'] = temp_plot['state'] + '<br>' +\
# 'Default rate '+ temp_plot['% Charged Off']
data_chloropleth = [ dict(
type ='choropleth',
colorscale = scl,
autocolorscale = False,
locations = temp_plot['state'],
z = temp_plot['% Charged Off'].astype(float),
locationmode = 'USA-states',
#text = temp_plot['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "Default rate")
) ]
layout = dict(
title = 'State-wise % Charged Off<br>(Hover for breakdown)',
geo = dict(
scope='usa',
projection=dict(type='albers usa'),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
#fig = dict(data=data_chloropleth, layout=layout)
#py.iplot(fig, image = 'png', filename = 'test-image-2', show_link = False)
# Change to py.iplot
chloromap = go.Figure(data = data_chloropleth, layout = layout)
iplot(chloromap, validate=False)
# 6) Variable under inspection:Verification Status
# In[60]:
# Separating the variable under investigation
ver_stat = new_data_df[12]
#print(np.unique(ver_stat, return_counts = True))
ver_stat_list = np.array(np.unique(ver_stat, return_counts = True))
ver_stat_df = pd.DataFrame(ver_stat_list.T)
print(ver_stat_df)
# Distribution of Emp_length
plt.figure()
plt.bar(ver_stat_df[0],ver_stat_df[1])
plt.xlabel('Verification Status')
plt.ylabel('Frequency')
plt.title('Verification Status')
plt.show()
ver_stat = pd.DataFrame(ver_stat)
status_ver_stat = pd.DataFrame(np.hstack((ver_stat,status_labels)))
status_ver_stat.columns = ['Verification Status','status_labels']
# Looking for a trend in the defaulted observations
Charged_off_ver_stat = status_ver_stat[status_ver_stat.status_labels == 1]
temp_71 = Charged_off_ver_stat.iloc[:,0].values
plot_var_ver_stat = np.array(np.unique(temp_71, return_counts = True))
plot_var_ver_stat_101 = pd.DataFrame(plot_var_ver_stat.T)
# Looking for a trend in the successful observations
Fully_Paid_ver_stat = status_ver_stat[status_ver_stat.status_labels == 0]
temp_72 = Fully_Paid_ver_stat.iloc[:,0].values
plot_var_ver_stat_2 = np.array(np.unique(temp_72, return_counts = True))
plot_var_ver_stat_111 = pd.DataFrame(plot_var_ver_stat_2.T)
# Concatenating the two variables
plot_var_stack_6 = np.hstack((plot_var_ver_stat_101,plot_var_ver_stat_111))
plot_var_stack_6 = pd.DataFrame(plot_var_stack_6)
plot_var_stack_6 = plot_var_stack_6.drop(plot_var_stack_6.columns[2], axis=1)
plot_var_stack_6.columns = ['Verification Status','Charged Off','Fully Paid']
# Percent stacked
# From raw value to percentage
totals = [i+j for i,j in zip(plot_var_stack_6['Charged Off'], plot_var_stack_6['Fully Paid'])]
C_Off = [i / j * 100 for i,j in zip(plot_var_stack_6['Charged Off'], totals)]
mean_C_Off = np.mean(C_Off)
F_Paid = [i / j * 100 for i,j in zip(plot_var_stack_6['Fully Paid'], totals)]
#plot_var_stack_5 = np.array(plot_var_stack_5)
group_number = plot_var_stack_6['Verification Status']
p1 = plt.bar(group_number, C_Off, color='#7f6d5f', edgecolor='white', width=0.5)
p2 = plt.bar(group_number, F_Paid, bottom=C_Off, color='#557f2d', edgecolor='white', width=0.5)
plt.xlabel('Verification Status')
plt.ylabel('Percent loan status')
plt.title('Verification Status')
plt.legend((p1, p2), ('Charged Off', 'Fully Paid'), loc = 'upper right')
plt.show()
# The result is slightly unexpected, as we would think that loan given after thorough verification would result in lesser percentage of "Charged Off" loans, but turns out that loans given off to people without verfication show a lesser "% Charged Off" loans.
# * ***Text Analysis - Phase 2 ***
# In[62]:
from wordcloud import WordCloud
# Employee Title
emp_title = new_data_df[8]
emp_title = pd.DataFrame(emp_title)
emp_title.columns = ['Employee Title']
emp_title = emp_title.dropna(axis=0, how='all')
wordcloud = WordCloud().generate(' '.join(emp_title['Employee Title']))
# Generate plot
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
# In[63]:
# Title
title = new_data_df[19]
title = pd.DataFrame(title)
title.columns = ['Title']
title = title.dropna(axis=0, how='all')
wordcloud3 = WordCloud().generate(' '.join(title['Title']))
# Generate plot
plt.imshow(wordcloud3)
plt.axis("off")
plt.show()
# From the word-cloud we can notice that majority of the people took a loan for debt consolidation, refinancing debt, credit card payment, or home improvement.
# In[64]:
# Description
Description = new_data_df[17]
Description = Description.dropna(axis=0, how='all')
Description = list(Description)
Description_1 = []
i = 0
for i in range(0,len(Description)):
s = Description[i]
s = s.replace("Borrower added on ", "")
s = s.replace("<br>", "")
Description_1.append(s)
i = i+1
Description_1 = pd.DataFrame(Description_1)
Description_1.columns = ['Description']
wordcloud4 = WordCloud().generate(' '.join(Description_1['Description']))
# Generate plot
plt.imshow(wordcloud4)
plt.axis("off")
plt.show()
# Again from the word-cloud we can notice that majority of the people described their reason for taking loan to pay off high interest credit card loan pay.
# ***Model Building - Phase 3***
# * Data pre-processing:
# * Cleaning the data ---
# 1) Selecting necessary features
# 2) Taking care of nan values
# In[65]:
# Data pre-processing
# Dealing with na values
new_data_copy = np.vstack((category_one_data,category_two_data))
new_data_copy = pd.DataFrame(new_data_copy)
#print(np.shape(new_data_copy)) # Dimensions of the dataset
#print(new_data_copy.isnull().sum()) # Printing number of na values in each column
#data_2 = new_data_copy.dropna(axis = 1, how = 'all') # Dropping columns where all values are na
data_2 = new_data_copy
#print(np.shape(data_2)) # Dimensions of new dataset
# We can observe that one of the column was removed since it was completely empty
data_dim = np.shape(data_2)
# We can see that a lot of columns contain 70% na values, which is no good for us
# Columns having more than 20-30% na values would not be of much help, thus eliminating them
col_nos = []
i = 0
for i in range (0,data_dim[1]):
num_na_val = data_2[i].isnull().sum()
if (num_na_val/len(data_2)) > 0.2:
col_nos.append(i)
i = i+1
data_2 = data_2.drop(data_2.columns[col_nos], axis = 1)
#print(data_2.isnull().sum())
np.shape(data_2)
# Now lets drop the columns like id, employee title, description,etc. which cannot be taken into consideration while modelling
rename_var_1 = range(0,49)
data_2.columns = rename_var_1
cols_remove = [0,10,11,17,18,19,20,21]
data_2 = data_2.drop(data_2.columns[cols_remove], axis = 1)
np.shape(data_2)
rename_var_2 = range(0,41)
data_2.columns = rename_var_2
time_series_var = [12,17,34,36]
cat_var_cols = [4,7,8,9,11,14,16,18,19,20,24,25,32,33,37,38,39]
cat_plus_time_cols = [4,7,8,9,11,12,14,16,17,18,19,20,24,25,32,33,34,36,37,38,39]
cat_var_df = data_2.iloc[:,cat_var_cols].values
cat_var_df = pd.DataFrame(cat_var_df)
#cat_var_df.describe(include=['category'])
i = 0
unique_categories = []
for i in cat_var_df:
un_cat = np.unique(cat_var_df[i])
unique_categories.append(un_cat)
i = i+1
# Removing more columns based on the above result
#print(unique_categories)
c = [11,12,13,15]
cat_var_df = cat_var_df.drop(cat_var_df.columns[c], axis = 1)
np.shape(cat_var_df)
r_var = range(0,13)
# We can observe that column 16 has 56 null values, let us replace them with the most frequent value of the column
cat_var_df.columns = r_var
#print(cat_var_df.isnull().sum())
# Taking care of missing values for categorical features
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'most_frequent', axis = 0)
imputer = imputer.fit(cat_var_df[[11]])
cat_var_df[11] = imputer.transform(cat_var_df[[11]])
#print(cat_var_df[11].isnull().sum())
#print(cat_var_df.isnull().sum())
renaming_df = range(0,13)
cat_var_df.columns = renaming_df
# We can clearly see that now there are no more na values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
i = 0
for i in range(0,13):
cat_var_df[i] = labelencoder_X.fit_transform(cat_var_df[i])
#onehotencoder = OneHotEncoder(categorical_features = [i])
i = i+1
# Taking care of missing values for remaining features
#print(data_2.isnull().sum())
data_2_copy = data_2
non_cat_var = data_2_copy.drop(data_2_copy.columns[cat_plus_time_cols], axis = 1)
rename_var = range(0,20)
non_cat_var.columns = rename_var
# Also dropping the target variable
Y = non_cat_var[[7]]
non_cat_var = non_cat_var.drop(non_cat_var.columns[7], axis = 1)
#non_cat_var = non_cat_var.drop(non_cat_var.columns[0], axis = 1) # Dropping the variable id
renaming_df = range(0,19)
non_cat_var.columns = renaming_df
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
i = 0
for i in non_cat_var:
imputer = imputer.fit(non_cat_var[[i]])
non_cat_var[i] = imputer.transform(non_cat_var[[i]])
i = i+1
#print(non_cat_var.isnull().sum())
# We have no nan values in our non_cat_var now
# Let us now concatenate the categrical variables and non_categorical variables and form ourfeature matrix.
# Checking the dimensions
print(np.shape(non_cat_var))
print(np.shape(cat_var_df))
print(np.shape(Y))
X = np.hstack((non_cat_var,cat_var_df)) # Concatenating
# Awesome ! The boring task of cleaning the data is successfully completed. Now,
# 1. Label Encoding the target variable "Loan status".
# 2. Splitting the dataset.
# 3. Feature scaling
# In[66]:
# Label encoding the target variable
labelencoder_Y = LabelEncoder()
Y = labelencoder_Y.fit_transform(Y)
#Splitting the dataset in training and testing set
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# So for model comparison, I have selected 6 Models for this analysis, and they are as follows:
# * Model 1 - XGBoostClassifier
# * Model 2 - Support Vector Classifier(SCV)
# * Model 3 - RandomForestClassifier
# * Model 4 - Logistic
# * Model 5 - BalancedBaggingClassifier
# * Model 6 - Decision Tree
#
# In[49]:
# Fitting XGBClassifier to the training data: Model_1
from xgboost import XGBClassifier
classifier_1 = XGBClassifier()
classifier_1.fit(X_train,Y_train)
# Fitting SVM to the training data: Model 2
from sklearn.svm import SVC
classifier_2 = SVC(kernel = 'linear', C = 1, probability = True, random_state = 0) # poly, sigmoid
classifier_2.fit(X_train,Y_train)
# Creating and Fitting Random Forest Classifier to the training data: Model 3
from sklearn.ensemble import RandomForestClassifier
classifier_3 = RandomForestClassifier(n_estimators = 5, criterion = 'entropy')
classifier_3.fit(X_train,Y_train)
# Fitting classifier to the training data: Model 4
from sklearn.linear_model import LogisticRegression
classifier_4 = LogisticRegression(penalty = 'l1', random_state = 0)
classifier_4.fit(X_train,Y_train)
# Fitting Balanced Bagging Classifier to the training data: Model 5
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.ensemble import RandomForestClassifier
classifier_5 = BalancedBaggingClassifier(base_estimator = RandomForestClassifier(criterion='entropy'),
n_estimators = 5, bootstrap = True)
classifier_5.fit(X_train,Y_train)
# Fitting Decision Tree to the training data: Model 6
from sklearn.tree import DecisionTreeClassifier
classifier_6 = DecisionTreeClassifier()
classifier_6.fit(X_train,Y_train)
# In[ ]:
# Predicting the results
y_pred_1 = classifier_1.predict(X_test)
y_pred_2 = classifier_2.predict(X_test)
y_pred_3 = classifier_3.predict(X_test)
y_pred_4 = classifier_4.predict(X_test)
y_pred_5 = classifier_5.predict(X_test)
y_pred_6 = classifier_6.predict(X_test)
# Creating the confusion matrix
from sklearn.metrics import confusion_matrix
cm_1 = confusion_matrix(Y_test,y_pred_1)
accuracy_1 = (cm_1[0,0]+cm_1[1,1])/len(Y_test)
cm_2 = confusion_matrix(Y_test,y_pred_2)
accuracy_2 = (cm_2[0,0]+cm_2[1,1])/len(Y_test)
cm_3 = confusion_matrix(Y_test,y_pred_3)
accuracy_3 = (cm_3[0,0]+cm_3[1,1])/len(Y_test)
cm_4 = confusion_matrix(Y_test,y_pred_4)
accuracy_4 = (cm_4[0,0]+cm_4[1,1])/len(Y_test)
cm_5 = confusion_matrix(Y_test,y_pred_5)
accuracy_5 = (cm_5[0,0]+cm_5[1,1])/len(Y_test)
cm_6 = confusion_matrix(Y_test,y_pred_6)
accuracy_6 = (cm_6[0,0]+cm_6[1,1])/len(Y_test)
print("Accuracy_XGBoost:",accuracy_1*100,'%',"\nAccuracy_SVC:",accuracy_2*100,'%',"\nAccuracy_RF:",accuracy_3*100,'%',"\nAccuracy_Logistic:",accuracy_4*100,'%',
"\nAccuracy_BalancedBagging:",accuracy_5*100,'%',"\nAccuracy_DecisionTree:",accuracy_6*100,'%')
# We can see that the accuracy for all the models is very high and pretty much the same. Thus we could prefereably use a simpler model like Decision Tree to classify our data.
#
# For future work, since as the dataset is huge, one can try classify this dataset using Artificial Neural Networks Networks. Also instead of binary classification one can try multi-class classification.
#
| [
"jcamsan@mit.edu"
] | jcamsan@mit.edu |
29bcb620ec75a93f13d88254737db6a6a998f321 | 5ff28a9f839a9c3cc416f1f4a2f6e8a656809c27 | /snakePlay.py | aa381137a352ea3d60f29c1a5c6d262a3c783096 | [] | no_license | Kripperoo/Genetic-Programming-for--Snake- | 2b655af958f263abe6f82c7f7abc48fde4d8a881 | 820715a2b43ae4c1d5482094f52db19f21e6dcc7 | refs/heads/master | 2020-04-16T08:41:29.456018 | 2019-01-13T20:24:58 | 2019-01-13T20:24:58 | 165,433,963 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,575 | py | # This version of the snake game allows you to play the same yourself using the arrow keys.
# Be sure to run the game from a terminal, and not within a text editor!
import curses
from curses import KEY_RIGHT, KEY_LEFT, KEY_UP, KEY_DOWN
import random
curses.initscr()
XSIZE,YSIZE = 18,18
NFOOD = 1
win = curses.newwin(YSIZE, XSIZE, 0, 0)
win.keypad(1)
curses.noecho()
curses.curs_set(0)
win.border(0)
win.nodelay(1)
def placeFood(snake, food):
for last in food:
win.addch(last[0], last[1], ' ')
food = []
while len(food) < NFOOD:
potentialfood = [random.randint(1, (YSIZE-2)), random.randint(1, (XSIZE-2))]
if not (potentialfood in snake) and not (potentialfood in food):
food.append(potentialfood)
win.addch(potentialfood[0], potentialfood[1], '*')
return( food )
def playGame():
score = 0
key = KEY_RIGHT
snake = [[4,10], [4,9], [4,8], [4,7], [4,6], [4,5], [4,4], [4,3], [4,2], [4,1],[4,0] ] # Initial snake co-ordinates
food = []
food = placeFood(snake,food)
win.timeout(150)
wasAhead = []
ahead = []
A = "NO"
while True:
win.border(0)
prevKey = key # Previous key pressed
event = win.getch()
key = key if event == -1 else event
if key not in [KEY_LEFT, KEY_RIGHT, KEY_UP, KEY_DOWN, 27]: # If an invalid key is pressed
key = prevKey
# Calculates the new coordinates of the head of the snake. NOTE: len(snake) increases
# This is taken care of later at [1] (where we pop the tail)
snake.insert(0, [snake[0][0] + (key == KEY_DOWN and 1) + (key == KEY_UP and -1), snake[0][1] + (key == KEY_LEFT and -1) + (key == KEY_RIGHT and 1)])
# Game over if the snake goes through a wall
if snake[0][0] == 0 or snake[0][0] == (YSIZE-1) or snake[0][1] == 0 or snake[0][1] == (XSIZE-1): break
ahead = [ snake[0][0] + (key == KEY_DOWN and 1) + (key == KEY_UP and -1), snake[0][1] + (key == KEY_LEFT and -1) + (key == KEY_RIGHT and 1)]
if ahead in snake:
A = "YES"
# Game over if the snake runs over itself
if snake[0] in snake[1:]: break
if snake[0] in food: # When snake eats the food
score += 1
food = placeFood(snake,food)
else:
last = snake.pop() # [1] If it does not eat the food, it moves forward and so last tail item is removed
win.addch(last[0], last[1], ' ')
win.addch(snake[0][0], snake[0][1], '#')
curses.endwin()
print(A)
print("\nFinal score - " + str(score))
print(wasAhead)
playGame()
| [
"yl2570@york.ac.uk"
] | yl2570@york.ac.uk |
81c34bd17953ac05445ed7cf1e981c1b620ddef9 | 9e9c8fede56a7b589bb38daf214e2535e3675aa5 | /Day_7/CollegeManagement/manage.py | 2ef9e180e9468085f91e8a952a754d67eaca8eb5 | [] | no_license | Kowshik-407/Python-Django-APSSDC | 97d8f29b62fc9c25f9772373df6b6f2f95132f05 | bdb5ba532cf828dfe53adeabdc86d256a87445b1 | refs/heads/master | 2023-05-14T11:16:12.141120 | 2021-05-29T14:04:16 | 2021-05-29T14:04:16 | 371,987,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CollegeManagement.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"aitha.kowshik3@gmail.com"
] | aitha.kowshik3@gmail.com |
111cd379d87c834c6be078e58efd77edf5971520 | 2abedfccdff742f243b29b74cea8dcb960ddbcc8 | /xiecheng1.py | ad5e4c2b3a09849ef3329457781f8bc95201788d | [] | no_license | gainiu/pythonTestFile | ad9d132edbc00bfd7d12b2b6f529c01da188c884 | 380f56d12f21f3c3f6fc2ffc49a58ead1dc3023f | refs/heads/master | 2021-05-17T02:53:47.722727 | 2020-10-09T14:53:46 | 2020-10-09T14:53:46 | 250,585,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py |
import asyncio
import datetime
async def crawl_page(url):
print('crawling {}'.format(url))
sleep_time = int(url.split('_')[-1])
await asyncio.sleep(sleep_time)
print('OK {}'.format(url))
async def main(urls):
for url in urls:
await crawl_page(url)
start=datetime.datetime.now()
asyncio.run(main(['url_1', 'url_2', 'url_3', 'url_4']))
end=datetime.datetime.now()
print('wait time:{}s'.format((end-start).seconds)) | [
"xiaomin@ut.cn"
] | xiaomin@ut.cn |
f91139bd80047041c60073a67980b751a340ce4b | 05fd17ba8b6aa8187f10a3898e31e77f33567342 | /backend/urls.py | 03744e796e1ea1d742d44d36a6b1ba2a2e0efcb1 | [] | no_license | kubakin/tvoy-kvest | 747d9b615bb4c5305c2beafc030636db9f100a4d | 01acc21231f8cced6469e4021973d7870c362484 | refs/heads/master | 2023-06-04T04:18:01.855386 | 2021-06-20T13:40:02 | 2021-06-20T13:40:02 | 320,089,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from backend import settings
urlpatterns = [
path('admin/', admin.site.urls), # admin panel
path('auth/', include('djoser.urls')), # Библиотека для авторизации
path('auth/', include('djoser.urls.jwt')), # Библиотека для авторизации
path('api/', include('rest_framework.urls')), # url api
path('auth/', include('djoser.urls.authtoken')), # Библиотека для авторизации
path('data/', include('app.urls')), # url api
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"lord.pogudin@yandex.ru"
] | lord.pogudin@yandex.ru |
a379cf6e57932968e1fde9b842122129a8ba1dfb | c4b8ae25752f7396538843f07cf413a746449dca | /src/main/python/lib/alblogs/__init__.py | f574a94d2823201b097525b4572c519171709a36 | [] | no_license | walmvdw/alblogs | e1f729e79dd2a822324de15bf2a989f2d17a8b82 | e0748f387a8f324a73977b43b4d65619dcb6338a | refs/heads/master | 2020-03-24T12:03:45.622847 | 2018-08-09T07:10:12 | 2018-08-09T07:10:12 | 142,702,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,994 | py | import argparse
import os
import logging.config
from alblogs import config, logsdb, statsdb
CONFIGURATION = None
LOGGER = None
def _find_config_path(options):
if options.config:
config_path = options.config
else:
config_path = os.environ.get('ALBLOGS_CONFIG_PATH')
if config_path is None:
config_path = os.getcwd()
if config_path is None:
raise RuntimeError("Unable to determine configuration path")
if not os.path.exists(config_path):
raise RuntimeError("Config path '{0}' does not exist".format(config_path))
config_file = os.path.join(config_path, "application.config")
if not os.path.exists(config_file):
raise RuntimeError("Config file '{0}' does not exist".format(config_file))
return config_path
def _init_logging(config_path):
log_config = os.path.join(config_path, "logging.config")
if os.path.exists(log_config):
logging.config.fileConfig(log_config)
global LOGGER
LOGGER = logging.getLogger(__name__)
LOGGER.info("Logging initialized from {0}".format(log_config))
else:
print("NO LOGGING CONFIGURATION FOUND AT {0}, LOGGING IS NOT INITIALIZED".format(log_config))
def _init_configuration(config_path):
global CONFIGURATION
CONFIGURATION = config.Configuration(config_path)
def get_default_argparser():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", action='store', help="Full path to the configuration directory")
return parser
def initialize(options):
config_path = _find_config_path(options)
_init_logging(config_path)
_init_configuration(config_path)
def get_configuration():
global CONFIGURATION
return CONFIGURATION
def open_logsdb(filename, create=False):
db = logsdb.Database(filename, create)
db.open()
return db
def open_statsdb(filename, create=False):
db = statsdb.Database(filename, create)
db.open()
return db
| [
"mark@markwal.com"
] | mark@markwal.com |
9250a9bbaeeec6d6a19bd7dd337e061b79b4b28f | ef73c542493eaa93df0cf1b776fe049cff45948d | /Django/python3/APIrest/APIrest/settings.py | d36cf11b0845b3c12fa4abf57dc5bd18a59b0cb3 | [
"MIT"
] | permissive | Gambl3r08/ejercicios-Python | d2c770bb79e4fe63bec70ed1c40fb193ed3da460 | ddf13b40c611f892112ebbe7bc907f5765998ea0 | refs/heads/main | 2023-06-09T20:53:32.966625 | 2021-06-30T16:38:10 | 2021-06-30T16:38:10 | 333,296,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | """
Django settings for APIrest project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-n7k87tqpg+hab7($suu4o1$*=*iku-n*&-hi#7nf5ct@iwunzy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'APIrest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'APIrest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"robertojoselozada@gmail.com"
] | robertojoselozada@gmail.com |
b58084cfdb07122a03dfad23b32755d8b04bb7ff | 4bdbac3e3c6cc8ddc46b0ef433daff5006789aea | /logger.py | 9e3408c625a1cd7424713eab1ea943aefb1c30de | [
"MIT"
] | permissive | jhaip/stream-logger | f0dcad143a108d2e5c2824bd085f91107f6d7714 | 001f525b19d7a7d05b508bf0ba81ca9f03948015 | refs/heads/master | 2021-08-16T05:10:08.355669 | 2017-11-19T02:00:18 | 2017-11-19T02:00:18 | 106,340,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,197 | py | #!/usr/bin/env python
# Takes large chunks of code from
# the Segment Python logging library: https://github.com/segmentio/analytics-python
# and this post on Stack overflow for reading the command line: https://stackoverflow.com/questions/11109859/pipe-output-from-shell-command-to-a-python-script
# MIT License
from uuid import uuid4
from datetime import datetime
from threading import Thread
import logging
import sys
import time
import requests
import atexit
try:
import queue
from queue import Empty
except:
import Queue as queue
from Queue import Empty
REMOTE_URL = 'http://localhost:5000/api/data';
class Client(object):
logger = logging.getLogger('stream-logger')
def __init__(self, max_queue_size=10000, host=None):
self.queue = queue.Queue(max_queue_size)
self.consumer = Consumer(self.queue, host=host, headers=None)
self.input_stream = None
self.logger.setLevel(logging.DEBUG)
# self.logger.addHandler(logging.StreamHandler())
self.logger.info("------ first post")
# On program exit, allow the consumer thread to exit cleanly.
# This prevents exceptions and a messy shutdown when the interpreter is
# destroyed before the daemon thread finishes execution. However, it
# is *not* the same as flushing the queue! To guarantee all messages
# have been delivered, you'll still need to call flush().
atexit.register(self.join)
self.consumer.start()
self.open_stream()
self.run()
def open_stream(self):
# Modifed source from https://stackoverflow.com/questions/11109859/pipe-output-from-shell-command-to-a-python-script
if not sys.stdin.isatty():
# use stdin if it's full
self.input_stream = sys.stdin
else:
# otherwise, read the given filename
try:
input_filename = sys.argv[1]
except IndexError:
message = 'need filename as first argument if stdin is not full'
raise IndexError(message)
else:
self.input_stream = open(input_filename, 'rU')
def run(self):
inputEmptyCount = 0
try:
while True:
line = self.input_stream.readline()
if line:
self.log(line)
inputEmptyCount = 0
if not self.input_stream.isatty():
inputEmptyCount += 1
if inputEmptyCount > 2:
self.logger.info("EMPTY COUNT")
break
except Exception as e:
self.logger.error(e)
finally:
self.flush()
self.logger.info("EXITING")
def log(self, msg):
"""Push a new `msg` onto the queue, return `(success, msg)`"""
try:
self.queue.put(msg, block=False)
self.logger.info('enqueued message. %s' % msg)
return True, msg
except queue.Full:
self.log.warn('analytics-python queue is full')
return False, msg
def flush(self):
"""Forces a flush from the internal queue to the server"""
queue = self.queue
size = queue.qsize()
queue.join()
# Note that this message may not be precise, because of threading.
self.logger.info('successfully flushed about %s items.', size)
def join(self):
"""Ends the consumer thread once the queue is empty. Blocks execution until finished"""
self.consumer.pause()
try:
self.consumer.join()
except RuntimeError:
# consumer thread has not started
pass
class Consumer(Thread):
"""Consumes the messages from the client's queue."""
logger = logging.getLogger('stream-logger')
def __init__(self, queue, host=None, upload_size=100, headers=None):
"""Create a consumer thread."""
Thread.__init__(self)
# Make consumer a daemon thread so that it doesn't block program exit
self.daemon = True
self.upload_size = upload_size
self.host = host
self.headers = headers
self.queue = queue
# It's important to set running in the constructor: if we are asked to
# pause immediately after construction, we might set running to True in
# run() *after* we set it to False in pause... and keep running forever.
self.running = True
self.retries = 3
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.StreamHandler())
self.logger.info("HELPPP!!!!")
def run(self):
"""Runs the consumer."""
self.logger.info('consumer is running...')
while self.running:
self.upload()
self.logger.info('consumer exited.')
def pause(self):
"""Pause the consumer."""
self.running = False
def upload(self):
"""Upload the next batch of items, return whether successful."""
success = False
batch = self.next()
if len(batch) == 0:
return False
try:
self.logger.debug('going to request!')
self.request(batch)
success = True
except Exception as e:
self.logger.error('error uploading: %s', e)
success = False
if self.on_error:
self.on_error(e, batch)
finally:
# mark items as acknowledged from queue
for item in batch:
self.queue.task_done()
return success
def next(self):
"""Return the next batch of items to upload."""
queue = self.queue
items = []
while len(items) < self.upload_size:
try:
item = queue.get(block=True, timeout=0.5)
items.append(item)
except Empty:
break
return items
def request(self, batch, attempt=0):
"""Attempt to upload the batch and retry before raising an error """
msg = {}
timestamp = msg.get('timestamp')
if timestamp is None:
timestamp = datetime.utcnow()
msg['timestamp'] = timestamp.isoformat()
msg['messageId'] = str(uuid4())
msg['value'] = "".join(batch)
msg['source'] = 'Serial'
try:
# post(self.write_key, self.host, batch=batch)
self.logger.info("-------")
self.logger.info(msg)
self.logger.info(self.host)
self.logger.info(self.headers)
r = requests.post(
self.host,
headers=self.headers,
json=msg
)
if r.status_code >= 200 and r.status_code < 300:
self.logger.debug('data uploaded successfully')
return r
else:
self.logger.debug('non 2XX response')
self.logger.debug(r)
self.logger.debug(r.text)
except:
if attempt > self.retries:
raise
self.request(batch, attempt+1)
client = Client(host=REMOTE_URL)
| [
"haipjacob@gmail.com"
] | haipjacob@gmail.com |
8a3b47219f2a52a2413366c81f95eb3101b2e66d | 090a4e026addc9e78ed6118f09fd0d7d4d517857 | /graph_objs/histogram2dcontour/_marker.py | 47c90e4d1725de3a5c81e03eccb04a515a0076d3 | [
"MIT"
] | permissive | wwwidonja/new_plotly | 0777365e53ea7d4b661880f1aa7859de19ed9b9a | 1bda35a438539a97c84a3ab3952e95e8848467bd | refs/heads/master | 2023-06-04T19:09:18.993538 | 2021-06-10T18:33:28 | 2021-06-10T18:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,455 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.marker"
_valid_props = {"color", "colorsrc"}
# color
# -----
@property
def color(self):
"""
Sets the aggregation data.
The 'color' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
"""
def __init__(self, arg=None, color=None, colorsrc=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.histogram2dcontour.Marker`
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.histogram2dcontour.Marker
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.histogram2dcontour.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"wwwidonja@gmail.com"
] | wwwidonja@gmail.com |
1aee5532b5f9024e07b42354d594950663f21c62 | 53ecc78b4ad0c20dc085a281006ee8e041642fb4 | /simpson.py | 201689e1ae4bad7441453c8813b71c3cfcd3004a | [] | no_license | lobiviam/MPI_python | 4faca5fc585e943ba4ec372846dca3a1e050ca49 | 68dc0672a0beaacc2689c2e3eebfc1c231a20825 | refs/heads/master | 2021-05-12T10:27:17.736460 | 2018-01-17T23:10:20 | 2018-01-17T23:10:20 | 117,352,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | import numpy
import sys
import math
from mpi4py import MPI
from mpi4py.MPI import ANY_SOURCE
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# function to integrate
def f(x):
return 4 / (1 + math.pow(x, 2))
def integrate(a, b, n):
integral = 0
step = (b-a)/n
for i in range(1, int(n)):
3
x = a + i * step
integral = integral + (f(x - step) + 4 * f(x - 0.5 * step) + f(x)) * (step / 6)
return integral
def main():
# takes in command-line arguments [a,b,n]
a = float(sys.argv[1])
b = float(sys.argv[2])
n = int(sys.argv[3])
step_size = (b - a) / n
# current_n is the number of rectangles
current_n = n / size
current_a = a + rank * current_n * step_size # start point for certain process
current_b = current_a + current_n * step_size # end point
integral = numpy.zeros(1)
received = numpy.zeros(1)
integral[0] = integrate(current_a, current_b, current_n)
# root node receives results from all processes and sums them
if rank == 0:
total = integral[0]
for i in range(1, size):
comm.Recv(received, ANY_SOURCE)
total += received[0]
else:
# other process send their result
comm.Send(integral, 0)
if comm.rank == 0:
return "Success!, integral is equal to {0}".format(total)
if __name__ == '__main__':
main() | [
"lobiviam@gmail.com"
] | lobiviam@gmail.com |
9dce206f820695e94e6139412c9873479d016c8d | e945fa310c0777fc08a0b9c7f9b8e8ecf1bcbcbb | /eval2.py | 928eac5bd56803b95c0b03e1ecbb98bf02fc803b | [] | no_license | shatskiyIlya/hello-world | b663bd9626a246b83521985080759a60ae9ce16b | ec60bbaf07ca50f0f8e336ad21ddbf38aa7d0f18 | refs/heads/master | 2020-06-11T06:28:27.858358 | 2019-07-03T12:13:37 | 2019-07-03T12:13:37 | 193,875,977 | 0 | 0 | null | 2019-06-27T10:10:46 | 2019-06-26T09:43:56 | null | UTF-8 | Python | false | false | 110 | py | #!/usr/bin/env python3
import os
assert eval("2 + 3 * len('hello')") == 17
eval("os.system('clear')", {})
| [
"noreply@github.com"
] | shatskiyIlya.noreply@github.com |
343c09ee3435c386839c27bd67fa59ee8cbea696 | ddbbae38a4d902a21075bca3c9824b81a84e915f | /freetrade/feeds/kibot.py | 545ec1bd92b8fb123c6708347e42b7a05ff02fb8 | [] | no_license | jmrichardson/freetrade | 5a1faa998e6449b35f5ca47dab09666bfc7f0666 | 3c04782ea38a3a8d2943aa4ed092ebfe84a2c65e | refs/heads/master | 2022-05-28T01:12:28.087735 | 2020-05-02T00:41:35 | 2020-05-02T00:41:35 | 260,307,433 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | import pandas as pd
from freetrade.feeds.feed import Feed
class Kibot(Feed):
def __init__(self, path, nrows=None):
# X must have format of open, hich, low, close volume - with datetime index
self.X = pd.read_csv(path, names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'], nrows=nrows)
self.X['date'] = self.X.date + ' ' + self.X.time
self.X.drop(columns=['time'], inplace=True)
self.X['date'] = pd.to_datetime(self.X['date'])
self.X.set_index('date', inplace=True)
| [
"jmrichardson@gmail.com"
] | jmrichardson@gmail.com |
0e35a354a75ae2a7b94e4f6e68da838999426d04 | 6b55ea780749ccb401f762be81a4b2446edf7756 | /src/keyboard/motorOffsetTest.py | 4c2a41c7ea5a4dfc63442a8d8a06e6ae57bf1990 | [] | no_license | GoldeneyeRohan/RDRONE | a59709483902ec9edf4c5971b43b704575a0ba3e | 017edeef5e48d8fbccaf4e0fec33473057fd4c3e | refs/heads/master | 2020-03-23T03:56:25.263069 | 2018-09-22T03:37:45 | 2018-09-22T03:37:45 | 141,057,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | #!/usr/bin/env python
import getch
import rospy
from RDRONE.msg import Throttle
def setThrottle(char, throttleFront, throttleRear):
if (throttleFront <= 1780) and (char == 'a'):
return throttleFront + 20, throttleRear
elif (throttleFront >= 1220) and (char == 'd'):
return throttleFront - 20, throttleRear
elif (throttleRear <= 1780) and (char == 'j'):
return throttleFront, throttleRear + 20
elif (throttleRear >= 1220) and (char == 'l'):
return throttleFront, throttleRear - 20
else:
return throttleFront, throttleRear
def key_node():
pub = rospy.Publisher('throttle_cmd', Throttle)
rospy.init_node('key_node')
throttleFront = 1200
throttleRear = 1200
print('RDRONE constant motor offset test node. ')
print('Use A to increment trottle by 20, use D to decrement throttle by 20 (range is 1200-1800)')
print('Use F to exit')
while not rospy.is_shutdown():
msg = Throttle()
print(throttleFront, throttleRear)
msg.FL = throttleFront
msg.FR = throttleFront
msg.RR = throttleRear
msg.RL = throttleRear
pub.publish(msg)
char = getch.getch()
throttleFront, throttleRear = setThrottle(char,throttleFront, throttleRear)
if char == 'f':
exit(0)
if __name__=="__main__":
try:
key_node()
except rospy.ROSInterruptException:
pass
| [
"rohan.sinha@berkeley.edu"
] | rohan.sinha@berkeley.edu |
23958a20d41b8c8d0224fb2c7cdd7b3b3106acc2 | 2e919a1e9f5d6b0ffef37b626d5dc97a5b2f63cd | /Apunte_Teorico/Capitulo_16_LISTAS_ENLAZADAS/listas_enlazadas.py | e81e7d60cda2bcabba0822b4bafdbd2752118951 | [] | no_license | lucasguerra91/some-python | c3992ac6c49f015bf14f37996ff3c8b0ed7b4d0a | 0678e8d884e0641d592a3a457db11cc2085c8b27 | refs/heads/master | 2021-01-18T13:06:32.454114 | 2019-06-24T01:46:58 | 2019-06-24T01:46:58 | 80,723,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,896 | py | from Parcialitos.Tercero import nodo as n
from Parcialitos.Tercero import iterador_lista_enlazada as iter
class ListaEnlazada:
""" Modela una lista enlazada"""
def __init__(self):
""" Crea una lista enlazada vacia"""
# referencia al primer nodo (None si la lista esta vacia)
self.prim = None
# referencia al ultimo
self.ultimo = None
# cantidad de elementos de la lista
self.len = 0
def __len__(self):
return self.len
# Ejercicio 11.6
def __str__(self):
""" Genera una salida legible de lo que contiene la lista, similar a las listas de Python"""
lista = []
actual = self.prim
while actual:
lista.append(str(actual))
actual = actual.prox
return str(lista)
def __iter__(self):
return iter.IteradorListaEnlazada(self.prim)
def __index__(self, x):
""" Busca el indice de la primer aparicion de x dentro de la lista, si no esta
levanta ValueError"""
if not self.esta_vacia():
pos = 0
actual = self.prim
while actual:
if actual.dato == x:
return pos
actual = actual.prox
pos += 1
raise ValueError(f"{x} no se encuentra dentro de la lista")
else:
raise ValueError("Lista vacía.")
def esta_vacia(self):
if self.prim:
return False
return True
def pop(self, i=None):
""" Elimina el nodo en la posicion i, y devuelve el dato contenido.
Si i esta fuera de rango, se levanta la excepcion IndexError
Si no se recibe la posicion, devuelve el ultimo elemento"""
if i is None:
i = self.len - 1
if i < 0 or i >= self.len:
raise IndexError('Indice fuera de rango')
if i == 0:
# caso particular: saltear la cabecera de la lista
dato = self.prim.dato
self.prim = self.prim.prox
else:
# buscar los nodos en las posiciones (i-1) e (i)
n_ant = self.prim
n_act = n_ant.prox
for pos in range(1, i):
n_ant = n_act
n_act = n_ant.prox
# Guardar el dato y descartar el nodo
if n_act == self.ultimo:
n_ant.prox = None
self.ultimo = n_ant
else:
n_ant.prox = n_act.prox
dato = n_act.dato
self.len -= 1
return dato
def remueve_por_valor(self, x):
"""Borra la primera aparicion del valor x en la lista.
Si x no esta en la lista, levanta ValueError"""
if self.len == 0:
raise ValueError("Lista vacía")
if self.prim.dato == x:
# Caso particular: saltear la cabecera de la lista
self.prim = self.prim.prox
else:
# Busca el nodo anterior al que contiene x (n_ant)
n_ant = self.prim
n_act = n_ant.prox
while n_act is not None and n_act.dato != x:
n_ant = n_ant.prox
n_act = n_act.prox
if not n_act.prox and n_act.dato != x:
raise ValueError(f"El valor {n_act}no esta en la lista")
if n_act == self.ultimo:
n_ant.prox = None
self.ultimo = n_ant
else:
n_ant.prox = n_act.prox
self.len -= 1
def insertar_en_pos(self, i, x):
""" Inserta el elemento x en la posicion i.
Si la posicion es invalida, levanta IndexError"""
if i < 0 or i > self.len:
raise IndexError('Posición inválida')
nuevo = n.Nodo(x)
if i == 0:
# Caso particular, insertar al principio
nuevo.prox = self.prim
self.prim = nuevo
else:
# Buscar el nodo anterior a la posicion deseada
anterior = self.prim
actual = anterior.prox
for pos in range(1, i):
anterior = anterior.prox
actual = actual.prox
# Intercalar el nuevo nodo
if anterior == self.ultimo:
# print(f"{anterior}es ultimo")
anterior.prox = nuevo
self.ultimo = nuevo
else:
anterior.prox = nuevo
nuevo.prox = actual
self.len += 1
def insertar_al_final(self, x):
""" Agrega un elemento al final ; si la lista esta vacia se actualiza el primero y el ultimo"""
# print(f"DEBUGG : ENTRO {x}")
nuevo = n.Nodo(x)
if not self.esta_vacia():
self.ultimo.prox = nuevo
self.ultimo = nuevo
else:
self.prim = self.ultimo = nuevo
self.len += 1
# Ej 11.7
def extend(self, otra):
""" Se extiende la lista con otra que se recibe como parametro """
if self.esta_vacia() or otra.esta_vacia():
raise ValueError("Una de las listas esta vacia")
self.ultimo.prox = otra.prim
self.ultimo = otra.ultimo
self.len += otra.len
# Ej 11.8
def remover_todos(self, elemento):
""" Remueve todas las apariciones del elemento en la lista y devuelve la cantidad removida.
Si esta vacia levanta error, si el elemento no esta levanta error """
if not self.esta_vacia():
try:
borrados = 0
ant = None
act = self.prim
while act:
if act.dato == elemento:
# evalua el primero
if act == self.prim:
self.prim = self.prim.prox
# en caso de que sea el último
elif act == self.ultimo:
self.ultimo = ant
ant.prox = None
# en caso de que sea un don nadie
else:
ant.prox = act.prox
borrados += 1
ant = act
act = ant.prox
self.len -= borrados
return borrados
except:
return 0
else:
raise ValueError("La lista está vacía.")
# Ej 11.9
def duplicar_elemento(self, elemento):
"""Recibe un elemento y duplica todas sus apariciones dentro de la lista """
if not self.esta_vacia():
act = self.prim
while act:
if act.dato == elemento:
# Ojo con crearlo afuera del while, puede ser mas de uno
nuevo = n.Nodo(elemento)
if act == self.ultimo:
act.prox = nuevo
nuevo.prox = None
self.ultimo = nuevo
else:
nuevo.prox = act.prox
act.prox = nuevo
self.len += 1
# Si lo agrego no lo evalúo en la sgte vuelta
act = act.prox.prox
else:
act = act.prox
else:
raise ValueError("Lista vacía.")
li = ListaEnlazada()
li.insertar_al_final('Hola')
li.insertar_al_final('Hola2')
li.insertar_al_final('Hola3')
# Instertar en posicion
li.insertar_en_pos(3, 'test')
li.insertar_en_pos(3, 'test2')
li.insertar_en_pos(3, 'test3')
li.insertar_al_final('Hola4')
# Remueve por valor el primero
# li.remueve_por_valor('Hola')
# Remueve por valor cualquier
# li.remueve_por_valor('Hola2')
# Remueve por valor el ultimo
# li.remueve_por_valor('test')
# pop , borra el ultimo
li.pop()
li.pop(2)
print(li)
# for valor in li:
# print(valor)
#
# print('\n')
# print(f"Primero : {li.prim}")
# print(f"Ultimo: {li.ultimo}")
# print(f"Longitud: {li.len}")
li2 = ListaEnlazada()
li2.insertar_al_final('Hola4')
li2.insertar_al_final('Hola5')
li2.insertar_al_final('Hola6')
print(li2)
li.extend(li2)
print(li)
print(f"\nPrimero : {li.prim}")
print(f"Ultimo: {li.ultimo}")
print(f"Longitud: {li.len}")
# li3 = ListaEnlazada()
# li.extend(li3)
li.insertar_al_final('test')
print(li)
# print(f"Se removieron {li.remover_todos('test')} apariciones de 'test'")
print(f"\nSe removieron {li.remover_todos('Hola')} apariciones de 'Hola'")
print(li)
print(f"Primero : {li.prim}")
print(f"Ultimo: {li.ultimo}")
print(f"Longitud: {li.len}")
li3 = ListaEnlazada()
li3.insertar_al_final(8)
li3.insertar_al_final(5)
li3.insertar_al_final(8)
li3.insertar_al_final(8)
li3.insertar_al_final(1)
li3.insertar_al_final(8)
print(li3)
li3.duplicar_elemento(8)
print(li3)
print(f"Primero : {li3.prim}")
print(f"Ultimo: {li3.ultimo}")
print(f"Longitud: {li3.len}")
| [
"guerra986@gmail.com"
] | guerra986@gmail.com |
4c486a78eddaef361ab1c40a262d574b834990eb | 2ccb7f724fd3b055cd3791001d3d2dae5139f4c6 | /softwares/hisat2-2.1.0/hisatgenotype_scripts/hisatgenotype_locus_samples.py | 8cb08e363ccca70ce0d9eb2a94781c1df08f918a | [
"MIT",
"GPL-3.0-only"
] | permissive | NCBI-Hackathons/ContainerInception | b6a8aaeacc50a2d0685aae2f8d18c61379469901 | aad1ead6798a4644d1ee2a698b4644bec2301e54 | refs/heads/master | 2020-03-07T19:14:17.470557 | 2018-04-05T00:49:09 | 2018-04-05T00:49:09 | 127,665,600 | 6 | 7 | MIT | 2018-04-04T22:31:10 | 2018-04-01T19:42:40 | Python | UTF-8 | Python | false | false | 9,347 | py | #!/usr/bin/env python
#
# Copyright 2015, Daehwan Kim <infphilo@gmail.com>
#
# This file is part of HISAT 2.
#
# HISAT 2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HISAT 2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HISAT 2. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, subprocess, re, threading
import inspect
import random
import glob
from argparse import ArgumentParser, FileType
import hisatgenotype_typing_common as typing_common
"""
"""
class myThread(threading.Thread):
def __init__(self,
lock,
paths,
reference_type,
region_list,
num_editdist,
max_sample,
assembly,
out_dir,
verbose):
threading.Thread.__init__(self)
self.lock = lock
self.paths = paths
self.reference_type = reference_type
self.region_list = region_list
self.num_editdist = num_editdist
self.max_sample = max_sample
self.assembly = assembly
self.out_dir = out_dir
self.verbose = verbose
def run(self):
global work_idx
while True:
self.lock.acquire()
my_work_idx = work_idx
work_idx += 1
self.lock.release()
if my_work_idx >= len(self.paths) or \
my_work_idx >= self.max_sample:
return
worker(self.lock,
self.paths[my_work_idx],
self.reference_type,
self.region_list,
self.num_editdist,
self.assembly,
self.out_dir,
self.verbose)
"""
"""
work_idx = 0
def worker(lock,
path,
reference_type,
region_list,
num_editdist,
assembly,
out_dir,
verbose):
fq_name = path.split('/')[-1]
read_dir = '/'.join(path.split('/')[:-1])
genome = fq_name.split('.')[0]
if not fq_name.endswith("extracted.1.fq.gz"):
return
read_basename = fq_name[:fq_name.find("extracted.1.fq.gz")]
read_fname_1, read_fname_2 = "%s/%sextracted.1.fq.gz" % \
(read_dir, read_basename), "%s/%sextracted.2.fq.gz" % (read_dir, read_basename)
if not os.path.exists(read_fname_1) or not os.path.exists(read_fname_2):
return
lock.acquire()
print >> sys.stderr, genome
lock.release()
for family, loci in region_list.items():
test_hla_cmd = ["hisatgenotype_locus.py",
"--base", family]
if len(loci) > 0:
test_hla_cmd += ["--locus", ','.join(loci)]
test_hla_cmd += ["--num-editdist", str(num_editdist)]
test_hla_cmd += ["-1", read_fname_1, "-2", read_fname_2]
test_hla_cmd += ["--assembly-base"]
if out_dir != "":
test_hla_cmd += ["%s/%s" % (out_dir, genome)]
else:
test_hla_cmd += [genome]
if assembly:
test_hla_cmd += ["--assembly"]
if verbose:
lock.acquire()
print >> sys.stderr, ' '.join(test_hla_cmd)
lock.release()
proc = subprocess.Popen(test_hla_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
test_alleles = set()
output_list = []
for line in proc.stdout:
line = line.strip()
if line.find("abundance") == -1:
continue
rank, _, allele, _, abundance = line.split()
output_list.append([allele, abundance[:-2]])
lock.acquire()
for output in output_list:
allele, abundance = output
print >> sys.stdout, "%s\t%s\t%s" % (genome, allele, abundance)
sys.stdout.flush()
lock.release()
"""
"""
def genotyping(read_dir,
reference_type,
region_list,
num_editdist,
nthreads,
max_sample,
assembly,
out_dir,
verbose):
for database_name in region_list:
# Extract variants, backbone sequence, and other sequeces
typing_common.extract_database_if_not_exists(database_name,
[]) # locus_list
# Build HISAT2's graph index
typing_common.build_index_if_not_exists(database_name,
"hisat2",
"graph",
1, # threads
verbose)
if not os.path.exists(read_dir):
print >> sys.stderr, "Error: %s does not exist." % read_dir
sys.exit(1)
if out_dir != "" and not os.path.exists(out_dir):
os.mkdir(out_dir)
# fastq files
fq_fnames = glob.glob("%s/*.extracted.1.fq.gz" % read_dir)
lock = threading.Lock()
threads = []
for t in range(nthreads):
thread = myThread(lock,
fq_fnames,
reference_type,
region_list,
num_editdist,
max_sample,
assembly,
out_dir,
verbose)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
"""
"""
if __name__ == '__main__':
parser = ArgumentParser(
description='genotyping on many samples')
parser.add_argument("--reference-type",
dest="reference_type",
type=str,
default="gene",
help="Reference type: gene, chromosome, and genome (default: gene)")
parser.add_argument("--region-list",
dest="region_list",
type=str,
default="",
help="A comma-separated list of regions (default: empty)")
parser.add_argument('--read-dir',
dest="read_dir",
type=str,
default="",
help='read directory (e.g. read_input)')
parser.add_argument("--num-editdist",
dest="num_editdist",
type=int,
default=2,
help="Maximum number of mismatches per read alignment to be considered (default: 2)")
parser.add_argument("-p", "--threads",
dest="threads",
type=int,
default=1,
help="Number of threads")
parser.add_argument('--assembly',
dest='assembly',
action='store_true',
help='Perform assembly')
parser.add_argument("--max-sample",
dest="max_sample",
type=int,
default=sys.maxint,
help="Number of samples to be analyzed (default: sys.maxint)")
parser.add_argument("--out-dir",
dest="out_dir",
type=str,
default="",
help='Output directory (default: (empty))')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='also print some statistics to stderr')
args = parser.parse_args()
if args.read_dir == "":
print >> sys.stderr, "Error: please specify --read-dir."
sys.exit(1)
if not args.reference_type in ["gene", "chromosome", "genome"]:
print >> sys.stderr, "Error: --reference-type (%s) must be one of gene, chromosome, and genome." % (args.reference_type)
sys.exit(1)
region_list = {}
if args.region_list != "":
for region in args.region_list.split(','):
region = region.split('.')
if len(region) < 1 or len(region) > 2:
print >> sys.stderr, "Error: --region-list is incorrectly formatted."
sys.exit(1)
family = region[0].lower()
if len(region) == 2:
locus_name = region[1].upper()
if family not in region_list:
region_list[family] = set()
if len(region) == 2:
region_list[family].add(locus_name)
genotyping(args.read_dir,
args.reference_type,
region_list,
args.num_editdist,
args.threads,
args.max_sample,
args.assembly,
args.out_dir,
args.verbose)
| [
"upendrakumar.devisetty@googlemail.com"
] | upendrakumar.devisetty@googlemail.com |
61bc810fc4fc4d2843ef4fc8db129b5b81950867 | 0897e2254eeac5458e4f17caea43871dd72fda9a | /great_expectations/data_context/store/ge_cloud_store_backend.py | c1f83808921a628333eb6b3165b9ab2493ff4f69 | [
"Apache-2.0"
] | permissive | NulledExceptions/great_expectations | 10f7710d7520f0b57ded539ce33075ddeb47856d | 8704c3ecf6632a72c55e012ed117d56f85a21f74 | refs/heads/main | 2023-09-05T19:30:32.910045 | 2021-11-18T20:50:42 | 2021-11-18T20:50:42 | 431,229,866 | 1 | 0 | Apache-2.0 | 2021-11-23T19:35:34 | 2021-11-23T19:35:34 | null | UTF-8 | Python | false | false | 10,060 | py | import logging
from abc import ABCMeta
from json import JSONDecodeError
from typing import Dict, Optional
from urllib.parse import urljoin
import requests
from great_expectations.data_context.store.store_backend import StoreBackend
from great_expectations.data_context.types.refs import GeCloudResourceRef
from great_expectations.exceptions import StoreBackendError
from great_expectations.util import (
filter_properties_dict,
hyphen,
pluralize,
singularize,
)
logger = logging.getLogger(__name__)
class GeCloudStoreBackend(StoreBackend, metaclass=ABCMeta):
PAYLOAD_ATTRIBUTES_KEYS = {
"suite_validation_result": "result",
"contract": "checkpoint_config",
"data_context": "data_context_config",
"expectation_suite": "suite",
"rendered_data_doc": "rendered_data_doc",
}
ALLOWED_SET_KWARGS_BY_RESOURCE_TYPE = {
"expectation_suite": {"clause_id"},
"rendered_data_doc": {"source_type", "source_id"},
"suite_validation_result": {"contract_id", "expectation_suite_id"},
}
def __init__(
self,
ge_cloud_credentials: Dict,
ge_cloud_base_url: Optional[str] = "https://app.greatexpectations.io/",
ge_cloud_resource_type: Optional[str] = None,
ge_cloud_resource_name: Optional[str] = None,
suppress_store_backend_id: Optional[bool] = True,
manually_initialize_store_backend_id: Optional[str] = "",
store_name: Optional[str] = None,
):
super().__init__(
fixed_length_key=True,
suppress_store_backend_id=suppress_store_backend_id,
manually_initialize_store_backend_id=manually_initialize_store_backend_id,
store_name=store_name,
)
assert ge_cloud_resource_type or ge_cloud_resource_name, (
"Must provide either ge_cloud_resource_type or " "ge_cloud_resource_name"
)
self._ge_cloud_base_url = ge_cloud_base_url
self._ge_cloud_resource_name = ge_cloud_resource_name or pluralize(
ge_cloud_resource_type
)
self._ge_cloud_resource_type = ge_cloud_resource_type or singularize(
ge_cloud_resource_name
)
self._ge_cloud_credentials = ge_cloud_credentials
# Initialize with store_backend_id if not part of an HTMLSiteStore
if not self._suppress_store_backend_id:
_ = self.store_backend_id
# Gather the call arguments of the present function (include the "module_name" and add the "class_name"), filter
# out the Falsy values, and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"ge_cloud_base_url": ge_cloud_base_url,
"ge_cloud_resource_name": ge_cloud_resource_name,
"ge_cloud_resource_type": ge_cloud_resource_type,
"fixed_length_key": True,
"suppress_store_backend_id": suppress_store_backend_id,
"manually_initialize_store_backend_id": manually_initialize_store_backend_id,
"store_name": store_name,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
filter_properties_dict(properties=self._config, inplace=True)
@property
def auth_headers(self):
return {
"Content-Type": "application/vnd.api+json",
"Authorization": f'Bearer {self.ge_cloud_credentials.get("access_token")}',
}
def _get(self, key):
ge_cloud_url = self.get_url_for_key(key=key)
try:
response = requests.get(ge_cloud_url, headers=self.auth_headers)
return response.json()
except JSONDecodeError as jsonError:
logger.debug(
"Failed to parse GE Cloud Response into JSON",
str(response.text),
str(jsonError),
)
raise StoreBackendError("Unable to get object in GE Cloud Store Backend.")
def _move(self):
pass
def _update(self, ge_cloud_id, value, **kwargs):
resource_type = self.ge_cloud_resource_type
account_id = self.ge_cloud_credentials["account_id"]
attributes_key = self.PAYLOAD_ATTRIBUTES_KEYS[resource_type]
data = {
"data": {
"type": resource_type,
"id": ge_cloud_id,
"attributes": {
attributes_key: value,
"account_id": account_id,
},
}
}
url = urljoin(
self.ge_cloud_base_url,
f"accounts/"
f"{account_id}/"
f"{hyphen(self.ge_cloud_resource_name)}/"
f"{ge_cloud_id}",
)
try:
response = requests.patch(url, json=data, headers=self.auth_headers)
response_status_code = response.status_code
if response_status_code < 300:
return True
return False
except Exception as e:
logger.debug(str(e))
raise StoreBackendError(
"Unable to update object in GE Cloud Store Backend."
)
@property
def allowed_set_kwargs(self):
return self.ALLOWED_SET_KWARGS_BY_RESOURCE_TYPE.get(
self.ge_cloud_resource_type, set()
)
def validate_set_kwargs(self, kwargs):
kwarg_names = set(kwargs.keys())
if len(kwarg_names) == 0:
return True
if kwarg_names <= self.allowed_set_kwargs:
return True
if not (kwarg_names <= self.allowed_set_kwargs):
extra_kwargs = kwarg_names - self.allowed_set_kwargs
raise ValueError(f'Invalid kwargs: {(", ").join(extra_kwargs)}')
def _set(self, key, value, **kwargs):
# Each resource type has corresponding attribute key to include in POST body
ge_cloud_id = key[1]
# if key has ge_cloud_id, perform _update instead
if ge_cloud_id:
return self._update(ge_cloud_id=ge_cloud_id, value=value, **kwargs)
resource_type = self.ge_cloud_resource_type
resource_name = self.ge_cloud_resource_name
account_id = self.ge_cloud_credentials["account_id"]
attributes_key = self.PAYLOAD_ATTRIBUTES_KEYS[resource_type]
data = {
"data": {
"type": resource_type,
"attributes": {
"account_id": account_id,
attributes_key: value,
**(kwargs if self.validate_set_kwargs(kwargs) else {}),
},
}
}
url = urljoin(
self.ge_cloud_base_url,
f"accounts/" f"{account_id}/" f"{hyphen(resource_name)}",
)
try:
response = requests.post(url, json=data, headers=self.auth_headers)
response_json = response.json()
object_id = response_json["data"]["id"]
object_url = self.get_url_for_key((self.ge_cloud_resource_type, object_id))
return GeCloudResourceRef(
resource_type=resource_type,
ge_cloud_id=object_id,
url=object_url,
)
# TODO Show more detailed error messages
except Exception as e:
logger.debug(str(e))
raise StoreBackendError("Unable to set object in GE Cloud Store Backend.")
@property
def ge_cloud_base_url(self):
return self._ge_cloud_base_url
@property
def ge_cloud_resource_name(self):
return self._ge_cloud_resource_name
@property
def ge_cloud_resource_type(self):
return self._ge_cloud_resource_type
@property
def ge_cloud_credentials(self):
return self._ge_cloud_credentials
def list_keys(self):
url = urljoin(
self.ge_cloud_base_url,
f"accounts/"
f"{self.ge_cloud_credentials['account_id']}/"
f"{hyphen(self.ge_cloud_resource_name)}",
)
try:
response = requests.get(url, headers=self.auth_headers)
response_json = response.json()
keys = [
(
self.ge_cloud_resource_type,
resource["id"],
)
for resource in response_json.get("data")
]
return keys
except Exception as e:
logger.debug(str(e))
raise StoreBackendError("Unable to list keys in GE Cloud Store Backend.")
def get_url_for_key(self, key, protocol=None):
ge_cloud_id = key[1]
url = urljoin(
self.ge_cloud_base_url,
f"accounts/{self.ge_cloud_credentials['account_id']}/{hyphen(self.ge_cloud_resource_name)}/{ge_cloud_id}",
)
return url
def remove_key(self, key):
if not isinstance(key, tuple):
key = key.to_tuple()
ge_cloud_id = key[1]
data = {
"data": {
"type": self.ge_cloud_resource_type,
"id": ge_cloud_id,
"attributes": {
"deleted": True,
},
}
}
url = urljoin(
self.ge_cloud_base_url,
f"accounts/"
f"{self.ge_cloud_credentials['account_id']}/"
f"{hyphen(self.ge_cloud_resource_name)}/"
f"{ge_cloud_id}",
)
try:
response = requests.patch(url, json=data, headers=self.auth_headers)
response_status_code = response.status_code
if response_status_code < 300:
return True
return False
except Exception as e:
logger.debug(str(e))
raise StoreBackendError(
"Unable to delete object in GE Cloud Store Backend."
)
def _has_key(self, key):
all_keys = self.list_keys()
return key in all_keys
@property
def config(self) -> dict:
return self._config
| [
"noreply@github.com"
] | NulledExceptions.noreply@github.com |
f7f4e1ec88509c895e7626e96405bd1b32ee10df | fef117e056a9e9081961c3c33396f400a4936da9 | /zjr/spiders/zjr_spider.py | 0613cb806f61fe999635894f49a7c44dadb8867f | [
"MIT"
] | permissive | freedomDR/fob_spider | fd45aa2efb579c28c220f37f7cccc1de11296f4d | a6342ee9cd8d1da36fa041c26b1e9e3481627699 | refs/heads/master | 2020-03-31T16:40:57.991135 | 2018-10-16T06:12:12 | 2018-10-16T06:12:12 | 152,385,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,316 | py | # -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
import logging
import requests
import re
class ZjrSpider(scrapy.Spider):
name = 'bbs'
data = ['161', '106', '177', '204', '120', '216', '205']
start_urls = ['http://bbs.fobshanghai.com/forum-'+data[0]+'-1.html']
basic_url = 'http://bbs.fobshanghai.com/'
max_index = 40
index = 1
def get_proxy(self):
return requests.get("http://proxy_pool:5010/get/").content
def del_proxy(self, hostip):
requests.get("http://proxy_pool:5010/delete/?proxy={}".format(hostip))
def start_requests(self):
for url in self.start_urls:
proxy = str(self.get_proxy(), encoding='utf-8')
yield scrapy.Request(url=url, callback=self.parse, meta={'proxy':'http://'+proxy, 'hostip':proxy, 'me_url':url})
def parse(self, response):
self.index += 1
soup = BeautifulSoup(response.text, 'html5lib')
# 判断代理是否被封
if len(soup.find_all('a', href=re.compile('ip\.'))) == 1:
self.del_proxy(response.meta['hostip'])
proxy = str(self.get_proxy(), encoding='utf-8')
yield scrapy.Request(url=response.meta['me_url'], callback=self.parse, meta={'proxy':'http://'+proxy, 'hostip':proxy, 'me_url':response.meta['me_url']})
else:
titles = soup.find_all('td', class_='f_title')
authors = soup.find_all('td', class_='f_author')
times = soup.find_all('td', class_='f_last')
if len(titles) == len(authors) and len(times) == len(authors):
for i in range(len(titles)):
# yield {"title":titles[i].a.text,
# "author":authors[i].a.text,
# "last time": times[i].a.text,
# "file_id": titles[i].a['href'].split('-')[1]}
proxy = str(self.get_proxy(), encoding='utf-8')
url = self.basic_url + titles[i].a['href']
yield scrapy.Request(url=url, callback=self.parse_item, meta={'proxy': "http://"+proxy, 'hostip':proxy, 'me_url':url})
next_html = ''
tmp = soup.find_all('a', 'p_redirect')
if self.index <= self.max_index:
next_html = self.start_urls[0][:self.start_urls[0].rfind('-')+1] + str(self.index) + '.html'
proxy = str(self.get_proxy(), encoding='utf-8')
yield scrapy.Request(next_html, callback=self.parse, meta={'proxy': "http://"+proxy, 'hostip':proxy, 'me_url':next_html})
def parse_item(self, response):
soup = BeautifulSoup(response.text, 'html5lib')
# 判断代理是否被封
if len(soup.find_all('a', href=re.compile('ip\.'))) == 1 or soup.find('form') is None:
self.del_proxy(response.meta['hostip'])
proxy = str(self.get_proxy(), encoding='utf-8')
yield scrapy.Request(url=response.meta['me_url'], callback=self.parse, meta={'proxy':'http://'+proxy, 'hostip':proxy, 'me_url':response.meta['me_url']})
else:
only_one_page = False
cur_page, max_page = 0, 0
tmp_url = ''
if soup.find('a', class_='p_pages') is None:
only_one_page = True
if not only_one_page:
cur_page, max_page = map(int,soup.find('a', class_='p_pages').text.split('\xa0')[1].split('/'))
cur_page += 1
if cur_page == 2 or only_one_page: tmp_url = response.url.split('-')[-3]
else: tmp_url = response.url.split('=')[-3].split('&')[0]
tmp = soup.find('form').find_all('div', recursive=False)
for item in tmp:
author = item.table.tbody.tr.find_all('td', recursive=False)[0].find_all('a', recursive=False)[1].text
content = item.find('div', class_='t_msgfont')
if content is None:
content = '这个作者被禁言了'
continue
elif content.text.strip() == '':
content = '作者回复的是表情-_-'
continue
elif author == '':
continue
else:
content = content.text
content = content.replace('\xa0', '')
content = content.replace(' ', '')
content = content.replace('\n', '')
time = item.find_all('div', class_='right')[1].next_sibling.next_sibling.text.split('\n')[3][4:-1]
yield {"author": author,
"content": content,
"time": time,
"file_id": tmp_url}
if soup.find('a', class_='p_pages') is not None:
if cur_page < max_page:
proxy = str(self.get_proxy(), encoding='utf-8')
url = 'http://bbs.fobshanghai.com/viewthread.php?tid='+tmp_url+'&extra=&page=' + str(cur_page)
yield scrapy.Request('http://bbs.fobshanghai.com/viewthread.php?tid='+tmp_url+'&extra=&page=' + str(cur_page), callback=self.parse_item, \
meta={'proxy':'http://'+proxy, 'hostip': proxy, 'me_url':url})
| [
"zjrforeverdr@gmail.com"
] | zjrforeverdr@gmail.com |
7970f4d308d13cf3b18a22061f0f3027b0e28b1c | 532a38c087e82ea260202ad4d04c449b9c482479 | /test/unit/confirmation_test.py | df9066a851b9faccd8ee146e15b0ec9bd391c608 | [
"MIT"
] | permissive | sudoguy/yandex-checkout-sdk-python | 2ab1c9db708cc28ce1dfe388c9831fdc14ba17d2 | 604594a31454e3f9222ebe59e98522e169ab6995 | refs/heads/master | 2020-04-30T09:25:52.804002 | 2019-03-20T14:14:46 | 2019-03-20T14:14:46 | 176,746,747 | 1 | 0 | MIT | 2019-03-20T14:03:14 | 2019-03-20T14:03:13 | null | UTF-8 | Python | false | false | 1,798 | py | import unittest
from yandex_checkout.domain.common.confirmation_type import ConfirmationType
from yandex_checkout.domain.models.confirmation.request.confirmation_redirect import \
ConfirmationRedirect as RequestConfirmationRedirect
from yandex_checkout.domain.models.confirmation.response.confirmation_redirect import \
ConfirmationRedirect as ResponseConfirmationRedirect
class ConfirmationTest(unittest.TestCase):
def test_confirmation_request(self):
confirmation = RequestConfirmationRedirect()
confirmation.type = ConfirmationType.REDIRECT
confirmation.enforce = True
confirmation.return_url = 'return.url'
self.assertEqual(confirmation.type, ConfirmationType.REDIRECT)
self.assertTrue(confirmation.enforce)
self.assertEqual(
{'type': ConfirmationType.REDIRECT, 'enforce': True, 'return_url': 'return.url'},
dict(confirmation)
)
with self.assertRaises(ValueError):
confirmation.return_url = ''
def test_confirmation_response(self):
confirmation = ResponseConfirmationRedirect()
confirmation.type = ConfirmationType.REDIRECT
confirmation.enforce = True
confirmation.return_url = 'return.url'
confirmation.confirmation_url = 'confirmation.url'
self.assertEqual(confirmation.type, ConfirmationType.REDIRECT)
self.assertTrue(confirmation.enforce)
self.assertEqual(
{
'type': ConfirmationType.REDIRECT,
'enforce': True,
'return_url': 'return.url',
'confirmation_url': 'confirmation.url'
},
dict(confirmation)
)
with self.assertRaises(ValueError):
confirmation.return_url = ''
| [
"bodrovia@yamoney.ru"
] | bodrovia@yamoney.ru |
d4e3754937132e0210f9917879194fe9a25c7921 | 4f9c0d9eb2200ae6de51a48b8d2b943c048225a4 | /python_onnxifi/onnxifi_backend.py | de75abf8c03f2071d386e384941beef5de20f6b5 | [] | no_license | varunjain99/onnx-xla | f9e5082f0e989c2d7c7e9327a19e8dcba8d33431 | d1ed8a8318b4d3cb7d597a0f9efaf98e37db5085 | refs/heads/master | 2020-03-20T10:30:07.680440 | 2018-08-13T22:36:34 | 2018-08-13T22:36:34 | 137,373,854 | 4 | 2 | null | 2018-08-13T22:36:35 | 2018-06-14T15:04:31 | C++ | UTF-8 | Python | false | false | 2,759 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from python_onnxifi import Backend, BackendRep
import onnx
from onnx import (NodeProto,
ModelProto)
class OnnxifiBackendRep(object):
def __init__(self, backendRep):
self.backend_rep_ = backendRep
# Inputs is a list of numpy arrays corresponding to ModelProto
def run(self, inputs, **kwargs):
return self.backend_rep_.run(inputs, **kwargs)
class OnnxifiBackend(object):
def __init__(self):
self.backend_ = Backend()
#Use ONNXIFI interface to determine compatibility of model
def is_compatible(self,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> bool
return self.backend_.is_compatible(model.SerializeToString(), device, **kwargs)
# Sets up model on given backend device
# Returns OnnxifiBackendRep object to be run
# TODO: Use kwargs to pass in weightDescriptors
def prepare(self,
model, # type: ModelProto
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> BackendRep
onnx.checker.check_model(model)
return OnnxifiBackendRep(self.backend_.prepare(model.SerializeToString(), device, **kwargs))
# Runs model using list of numpy inputs
# Returns list of outputs
def run_model(self,
model, # type: ModelProto
inputs, # type: Any
device='CPU', # type: Text
**kwargs # type: Any
): # type: (...) -> Tuple[Any, ...]
backendRep = self.prepare(model, device, **kwargs)
assert backendRep is not None
return backendRep.run(inputs, **kwargs)
#TODO: Implement run_node
def run_node(cls,
node, # type: NodeProto
inputs, # type: Any
device='CPU', # type: Text
outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
**kwargs # type: Dict[Text, Any]
): # type: (...) -> Optional[Tuple[Any, ...]]
return None
# Returns boolean indicating whether the backend with given device is supported
def supports_device(self, device): # type: (Text) -> bool
return self.backend_.supports_device(device)
# Utility function return information about available devices
def get_devices_info(self): #type: () -> [Sequence[Tuple[string, string]]]
return self.backend_.get_devices_info()
| [
"jainv@fb.com"
] | jainv@fb.com |
a7c7d7d525d043dc0a7958eecd424d047b938741 | b8acea93fc53b9c6e457959dd4b5446c520af272 | /2017-01-13/src/workshop_09.py | c4879efca71f5d0c3b72898cc1a3e6e903a0ca90 | [] | no_license | MarcoFaretra93/ggpl | c35fbd5a07ee70fc6073626325d17eef84faa9e6 | 15066fc26c3af67d52b6670e265f9ec0e7113c42 | refs/heads/master | 2021-01-10T22:50:11.618623 | 2017-01-28T10:09:56 | 2017-01-28T10:09:56 | 70,344,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | py | from pyplasm import *
from sympy import *
import numpy as np
def transformList2CoupledList(initList):
"""
transformList2CoupledList is a function that return a list containing, for every element in the initList, a couple (python tuple)
made by the original element and its successor.
Example : [1,2,3] -> [[1,2], [2,3], [3,1]]
@param initList: integer list represent the initial list.
@return coupleList: list of list of integer, represent the couple list.
"""
result = []
for element in range(len(initList)-1):
result.append([initList[element], initList[element+1]])
result.append([initList[-1], initList[0]])
return result
def get4CoefficientsOfThePlane(angle, line):
"""
get4CoefficientsOfThePlane is a function that from angle and line, return a list containing the 4 coefficients that describe
a plane passing through the line.
@param angle: integer represent the angle used to rotate the planes.
@param line: couple represent the verts of the line.
@return planesParam: list that contain the 4 coefficients.
"""
partialPlane = PROD([POLYLINE(line), QUOTE([2])])
partialPlane = T([1,2])([-line[0][0], -line[0][1]])(partialPlane)
partialPlane = ROTN([-angle, [line[1][0] - line[0][0], line[1][1] - line[0][1], 0]])(partialPlane)
partialPlane = T([1,2])([+line[0][0], +line[0][1]])(partialPlane)
#obtain 3 points
points = []
points.append(UKPOL(partialPlane)[0][0])
points.append(UKPOL(partialPlane)[0][1])
points.append(UKPOL(partialPlane)[0][2])
x1 = points[0][0]
x2 = points[1][0]
x3 = points[2][0]
y1 = points[0][1]
y2 = points[1][1]
y3 = points[2][1]
z1 = points[0][2]
z2 = points[1][2]
z3 = points[2][2]
#calculate the vectors
p1 = np.array([x1, y1, z1])
p2 = np.array([x2, y2, z2])
p3 = np.array([x3, y3, z3])
v1 = p3 - p1
v2 = p2 - p1
# this is a vector normal to the plane
cp = np.cross(v1, v2)
a, b, c = cp
# This evaluates a * x3 + b * y3 + c * z3 which equals d
d = np.dot(cp, p3)
return [a,b,c,d]
def buildRoof(verts, angle, height):
"""
buildRoof is a function that return a HPC Model represent the roof from the verts, angle and height.
@param verts: list of list of integer represent the verts that define the shape of roof bottom.
@param angle: integer represent the angle used to rotate the planes.
@param height: integer represent the height of the roof.
@return roof: HPC Model represent the roof.
"""
lines = transformList2CoupledList(verts)
base = SOLIDIFY(POLYLINE(verts + [verts[0]]))
planes = []
for line in lines:
planes.append(get4CoefficientsOfThePlane(angle, line))
couplePlanes = transformList2CoupledList(planes)
roofTop = []
linesEquations = []
# calculating equations with planes intersection
for couple in couplePlanes:
x, y, z = symbols('x y z')
solved = solve([Eq(couple[0][0]*x+couple[0][1]*y+couple[0][2]*z, couple[0][3]),
Eq(couple[1][0]*x+couple[1][1]*y+couple[1][2]*z, couple[1][3])])
linesEquations.append(solved)
roofTop.append([round(float(solved[x].subs(z,height)),2), round(float(solved[y].subs(z,height)),2)])
roofTop.append(roofTop[0])
terrace = T([3])([height])(SOLIDIFY(POLYLINE(roofTop)))
coupleLines = transformList2CoupledList(linesEquations)
roofPitch = []
#building roof pitches
for couple in coupleLines:
base1 = [round(float((couple[0])[x].subs(z,0)),2),round(float((couple[0])[y].subs(z,0)),2),0]
base2 = [round(float((couple[1])[x].subs(z,0)),2),round(float((couple[1])[y].subs(z,0)),2),0]
top1 = [round(float((couple[0])[x].subs(z,height)),2),round(float((couple[0])[y].subs(z,height)),2),height]
top2 = [round(float((couple[1])[x].subs(z,height)),2),round(float((couple[1])[y].subs(z,height)),2),height]
points = [base1, base2, top2, top1, base1]
faces = [[1,2,3,4]]
roofPitch.append(TEXTURE("texture/roof.jpg")(MKPOL([points, faces, 1])))
roofPitch = STRUCT(roofPitch)
return STRUCT([TEXTURE("texture/surface.jpg")(terrace), base, roofPitch])
| [
"mar.faretra@stud.uniroma3.it"
] | mar.faretra@stud.uniroma3.it |
a37cfd4aca4f0deb3e021528db8511beeaf2ec94 | d13bed6cdfc2ab867b6bb7750593581c96815857 | /venv/Lib/site-packages/pip/_internal/locations/__init__.py | 7d70810c1e469237ebab988f789f4b8de30113cc | [
"MIT"
] | permissive | kennywam/arenadreamblog | fbf0531fc62aaa3339fbd02d702eb8e2783dd519 | 7ddf7da4b40bf871879a4edfdcf5dcac4fd78266 | refs/heads/master | 2023-07-06T04:32:47.800553 | 2021-08-09T16:35:45 | 2021-08-09T16:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,236 | py | import functools
import logging
import os
import pathlib
import sys
import sysconfig
from typing import Any, Dict, Iterator, List, Optional, Tuple
from pip._internal.models.scheme import SCHEME_KEYS, Scheme
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.virtualenv import running_under_virtualenv
from . import _distutils, _sysconfig
from .base import (
USER_CACHE_DIR,
get_major_minor_version,
get_src_prefix,
is_osx_framework,
site_packages,
user_site,
)
__all__ = [
"USER_CACHE_DIR",
"get_bin_prefix",
"get_bin_user",
"get_major_minor_version",
"get_platlib",
"get_prefixed_libs",
"get_purelib",
"get_scheme",
"get_src_prefix",
"site_packages",
"user_site",
]
logger = logging.getLogger(__name__)
if os.environ.get("_PIP_LOCATIONS_NO_WARN_ON_MISMATCH"):
_MISMATCH_LEVEL = logging.DEBUG
else:
_MISMATCH_LEVEL = logging.WARNING
def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool:
platlib = scheme["platlib"]
if "/lib64/" not in platlib:
return False
unpatched = platlib.replace("/lib64/", "/lib/")
return unpatched.replace("$platbase/", "$base/") == scheme["purelib"]
@functools.lru_cache(maxsize=None)
def _looks_like_red_hat_lib() -> bool:
"""Red Hat patches platlib in unix_prefix and unix_home, but not purelib.
This is the only way I can see to tell a Red Hat-patched Python.
"""
from distutils.command.install import INSTALL_SCHEMES # type: ignore
return all(
k in INSTALL_SCHEMES
and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k])
for k in ("unix_prefix", "unix_home")
)
@functools.lru_cache(maxsize=None)
def _looks_like_debian_scheme() -> bool:
"""Debian adds two additional schemes."""
from distutils.command.install import INSTALL_SCHEMES # type: ignore
return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES
@functools.lru_cache(maxsize=None)
def _looks_like_red_hat_scheme() -> bool:
"""Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.
Red Hat's ``00251-change-users-install-location.patch`` changes the install
command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is
(fortunately?) done quite unconditionally, so we create a default command
object without any configuration to detect this.
"""
from distutils.command.install import install
from distutils.dist import Distribution
cmd: Any = install(Distribution())
cmd.finalize_options()
return (
cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local"
and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local"
)
def _fix_abiflags(parts: Tuple[str]) -> Iterator[str]:
ldversion = sysconfig.get_config_var("LDVERSION")
abiflags: str = getattr(sys, "abiflags", None)
# LDVERSION does not end with sys.abiflags. Just return the path unchanged.
if not ldversion or not abiflags or not ldversion.endswith(abiflags):
yield from parts
return
# Strip sys.abiflags from LDVERSION-based path components.
for part in parts:
if part.endswith(ldversion):
part = part[: (0 - len(abiflags))]
yield part
@functools.lru_cache(maxsize=None)
def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None:
issue_url = "https://github.com/pypa/pip/issues/10151"
message = (
"Value for %s does not match. Please report this to <%s>"
"\ndistutils: %s"
"\nsysconfig: %s"
)
logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new)
def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool:
if old == new:
return False
_warn_mismatched(old, new, key=key)
return True
@functools.lru_cache(maxsize=None)
def _log_context(
*,
user: bool = False,
home: Optional[str] = None,
root: Optional[str] = None,
prefix: Optional[str] = None,
) -> None:
parts = [
"Additional context:",
"users = %r",
"home = %r",
"root = %r",
"prefix = %r",
]
logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix)
def get_scheme(
dist_name: str,
user: bool = False,
home: Optional[str] = None,
root: Optional[str] = None,
isolated: bool = False,
prefix: Optional[str] = None,
) -> Scheme:
old = _distutils.get_scheme(
dist_name,
user=user,
home=home,
root=root,
isolated=isolated,
prefix=prefix,
)
new = _sysconfig.get_scheme(
dist_name,
user=user,
home=home,
root=root,
isolated=isolated,
prefix=prefix,
)
warning_contexts = []
for k in SCHEME_KEYS:
old_v = pathlib.Path(getattr(old, k))
new_v = pathlib.Path(getattr(new, k))
if old_v == new_v:
continue
# distutils incorrectly put PyPy packages under ``site-packages/python``
# in the ``posix_home`` scheme, but PyPy devs said they expect the
# directory name to be ``pypy`` instead. So we treat this as a bug fix
# and not warn about it. See bpo-43307 and python/cpython#24628.
skip_pypy_special_case = (
sys.implementation.name == "pypy"
and home is not None
and k in ("platlib", "purelib")
and old_v.parent == new_v.parent
and old_v.name.startswith("python")
and new_v.name.startswith("pypy")
)
if skip_pypy_special_case:
continue
# sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in
# the ``include`` value, but distutils's ``headers`` does. We'll let
# CPython decide whether this is a bug or feature. See bpo-43948.
skip_osx_framework_user_special_case = (
user
and is_osx_framework()
and k == "headers"
and old_v.parent.parent == new_v.parent
and old_v.parent.name.startswith("python")
)
if skip_osx_framework_user_special_case:
continue
# On Red Hat and derived Linux distributions, distutils is patched to
# use "lib64" instead of "lib" for platlib.
if k == "platlib" and _looks_like_red_hat_lib():
continue
# Both Debian and Red Hat patch Python to place the system site under
# /usr/local instead of /usr. Debian also places lib in dist-packages
# instead of site-packages, but the /usr/local check should cover it.
skip_linux_system_special_case = (
not (user or home or prefix or running_under_virtualenv())
and old_v.parts[1:3] == ("usr", "local")
and len(new_v.parts) > 1
and new_v.parts[1] == "usr"
and (len(new_v.parts) < 3 or new_v.parts[2] != "local")
and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme())
)
if skip_linux_system_special_case:
continue
# On Python 3.7 and earlier, sysconfig does not include sys.abiflags in
# the "pythonX.Y" part of the path, but distutils does.
skip_sysconfig_abiflag_bug = (
sys.version_info < (3, 8)
and not WINDOWS
and k in ("headers", "platlib", "purelib")
and tuple(_fix_abiflags(old_v.parts)) == new_v.parts
)
if skip_sysconfig_abiflag_bug:
continue
warning_contexts.append((old_v, new_v, f"scheme.{k}"))
if not warning_contexts:
return old
# Check if this path mismatch is caused by distutils config files. Those
# files will no longer work once we switch to sysconfig, so this raises a
# deprecation message for them.
default_old = _distutils.distutils_scheme(
dist_name,
user,
home,
root,
isolated,
prefix,
ignore_config_files=True,
)
if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS):
deprecated(
"Configuring installation scheme with distutils config files "
"is deprecated and will no longer work in the near future. If you "
"are using a Homebrew or Linuxbrew Python, please see discussion "
"at https://github.com/Homebrew/homebrew-core/issues/76621",
replacement=None,
gone_in=None,
)
return old
# Post warnings about this mismatch so users can report them back.
for old_v, new_v, key in warning_contexts:
_warn_mismatched(old_v, new_v, key=key)
_log_context(user=user, home=home, root=root, prefix=prefix)
return old
def get_bin_prefix() -> str:
old = _distutils.get_bin_prefix()
new = _sysconfig.get_bin_prefix()
if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"):
_log_context()
return old
def get_bin_user() -> str:
return _sysconfig.get_scheme("", user=True).scripts
def _looks_like_deb_system_dist_packages(value: str) -> bool:
"""Check if the value is Debian's APT-controlled dist-packages.
Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the
default package path controlled by APT, but does not patch ``sysconfig`` to
do the same. This is similar to the bug worked around in ``get_scheme()``,
but here the default is ``deb_system`` instead of ``unix_local``. Ultimately
we can't do anything about this Debian bug, and this detection allows us to
skip the warning when needed.
"""
if not _looks_like_debian_scheme():
return False
if value == "/usr/lib/python3/dist-packages":
return True
return False
def get_purelib() -> str:
"""Return the default pure-Python lib location."""
old = _distutils.get_purelib()
new = _sysconfig.get_purelib()
if _looks_like_deb_system_dist_packages(old):
return old
if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"):
_log_context()
return old
def get_platlib() -> str:
"""Return the default platform-shared lib location."""
old = _distutils.get_platlib()
new = _sysconfig.get_platlib()
if _looks_like_deb_system_dist_packages(old):
return old
if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"):
_log_context()
return old
def get_prefixed_libs(prefix: str) -> List[str]:
"""Return the lib locations under ``prefix``."""
old_pure, old_plat = _distutils.get_prefixed_libs(prefix)
new_pure, new_plat = _sysconfig.get_prefixed_libs(prefix)
warned = [
_warn_if_mismatch(
pathlib.Path(old_pure),
pathlib.Path(new_pure),
key="prefixed-purelib",
),
_warn_if_mismatch(
pathlib.Path(old_plat),
pathlib.Path(new_plat),
key="prefixed-platlib",
),
]
if any(warned):
_log_context(prefix=prefix)
if old_pure == old_plat:
return [old_pure]
return [old_pure, old_plat]
| [
"titusowuor30@gmail.com"
] | titusowuor30@gmail.com |
f2693585515f05f0b389e0a4473f101e65781a55 | ae6aee00cfe5c13ac42cc1e68f276be4542d7252 | /app/crud/category_crud.py | 65f86ff07942cafbfa834d971b21961874c2a127 | [] | no_license | manjurulhoque/fastapi-ecommerce | b40a76509741db288bb40f759ba78aed9b4dbdb7 | 5712c40a394410c670f253ba11dd53740ca434c2 | refs/heads/master | 2023-07-16T21:26:00.763382 | 2021-09-01T17:41:32 | 2021-09-01T17:41:32 | 349,721,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from sqlalchemy.orm import Session, joinedload
from app import schemas, models
def category_list(db: Session):
return db.query(models.Category).all()
def create_category(db: Session, category: schemas.CategoryCreate):
new_category = models.Category(name=category.name)
db.add(new_category)
db.commit()
db.refresh(new_category)
return new_category
| [
"rumimanzurulhoque@gmail.com"
] | rumimanzurulhoque@gmail.com |
dc6cf48155e80310c4175993b5fab0e09ccdcfe6 | 1d18a9358b9ea5b1cf1e9151e7ad321401c86b0f | /ecomsite/settings.py | 30794d6662322dbde12ad17879e99bcdab19c629 | [] | no_license | saipraneethreddy969/ecommerce | a73f737f3ce1046b77091c4fa3312480ff5f8388 | 60353f20f435679212b91944a098a7b077a14cfa | refs/heads/master | 2022-12-22T01:12:44.493025 | 2020-09-25T12:09:32 | 2020-09-25T12:09:32 | 264,087,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | """
Django settings for ecomsite project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9!%nw4l-y=r2ti2ii9h2dz&xilaa)m*$)%g4=cx$c%og)cyb10'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecomsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecomsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'assets')
STATIC_URL='/static/'
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static'),
]
MEDIA_URL='/images/'
MEDIA_ROOT=os.path.join(BASE_DIR,'images')
| [
"saipraneethreddy969@gmail.com"
] | saipraneethreddy969@gmail.com |
102801c4f7c6f1c31f9aa9cecc19e5d3385085c9 | 1f0831db24ae2772d4944faf05289599bb37aca7 | /linear_models/05/linear_regressor.py | e1b7703860f85225c49ec4b1f673ce5716526f4c | [] | no_license | smaystr/rails_reactor | 2123f39ae97f38acb647363979fe4a09b896670e | 69c8aac5860527768b4a8b7bce027b9dea6b1989 | refs/heads/master | 2022-08-19T05:35:21.535933 | 2019-08-28T12:46:22 | 2019-08-28T12:46:22 | 189,264,026 | 1 | 0 | null | 2022-07-29T22:34:56 | 2019-05-29T16:47:08 | Jupyter Notebook | UTF-8 | Python | false | false | 1,920 | py | from sklearn.base import RegressorMixin, BaseEstimator
import numpy as np
class MyLinearRegression(RegressorMixin, BaseEstimator):
"""
Linear regressor implementation with L2 regularization (aka RidgeRegression)
"""
def __init__(
self,
learning_rate,
num_iterations,
lam,
verbose=True,
fit_intercept=True,
print_steps=1000,
):
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.fit_intercept = fit_intercept
self.lam = lam
self.verbose = verbose
self.weights = None
self.print_steps = print_steps
def validate_inputs(self, X, Y):
assert len(X.shape) == 2
assert len(X) == len(Y)
return X
def initialize_weights(self, input_shape):
self.weights = np.random.normal(size=input_shape)[..., None]
def mse(self, preds, Y):
return (np.square(Y - preds)).mean()
def fit(self, X, Y):
inputs = self.validate_inputs(X, Y)
if self.fit_intercept:
inputs = np.concatenate((np.ones((len(X), 1)), X), axis=1)
self.initialize_weights(inputs.shape[1])
for i in range(self.num_iterations):
logits = np.dot(inputs, self.weights)
gradients = (
np.dot(inputs.T, (logits - Y)) / len(X)
+ (self.lam / len(X)) * self.weights
)
self.weights -= self.learning_rate * gradients
if self.verbose and i % self.print_steps == 0:
preds = self.predict(X)
loss = self.mse(preds, Y)
print(f"MSE at {i} step is {loss}\t RMSE is {np.sqrt(loss)}")
def predict(self, X):
inputs = X
if self.fit_intercept:
inputs = np.concatenate((np.ones((len(X), 1)), X), axis=1)
return np.dot(inputs, self.weights)
| [
"smaystr@gmail.com"
] | smaystr@gmail.com |
dcda998631a711e0b0de384484903c917a360dbc | a0ed9603591a6ab2fe9de8c4e752662acc1a6a43 | /MhcVizPipe/Tools/cl_tools_backup.py | 304a03beda643e93c28d05e680ee6936e3356796 | [
"MIT"
] | permissive | zpeng1989/MhcVizPipe | ee9a7cb4face3ad84cb5bc719f1d0720ca272bbd | 32ea13ac8c4e0db672ec78d9179b6f813bfa4349 | refs/heads/master | 2023-04-11T09:43:46.720784 | 2021-04-28T13:11:58 | 2021-04-28T13:11:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,323 | py | import subprocess
import pandas as pd
import os
from numpy import array_split
import numpy as np
from pathlib import Path
from MhcVizPipe.Tools.unmodify_peptides import clean_peptides
from typing import List
class MhcPeptides:
def __init__(self,
sample_name: str,
sample_description: str,
peptides: List[str]):
self.sample_name = sample_name.replace(' ', '_')
self.sample_description = sample_description
self.peptides = peptides
'''
tester_a = MhcPeptides(
sample_name='Test A',
sample_description='first test sample',
peptides=['ALITQQDLAPQQRAAP', 'ALPGQLKPFETLLSQN', 'ALQNIIPASTGAAK', 'ALQNIIPASTGAAKA', 'ALQNIIPASTGAAKAVG',
'AMSYVKDDIFRIYIK', 'AMSYVKDDIFRIYIKE', 'ANVIRYFPTQALN', 'APDQDEIQRLPGLAKQPS', 'APDQDEIQRLPGLAKQPSFR',
'APEPSTVQILHSP', 'APEPSTVQILHSPA', 'APEPSTVQILHSPAVE', 'APFSPDENSLVLFE', 'APGHRDFIKNMITGTSQ',
'APGHRDFIKNMITGTSQA', 'APGHRDFIKNMITGTSQAD', 'APGLIIATGSVGKN', 'APGPGRLVAQLDTEGVG']
)
tester_b = MhcPeptides(
sample_name='Test B',
sample_description='second test sample',
peptides=['AGLNVLRIINEPTAAAIA', 'AIFLFVDKTVPQSS', 'AIFLFVDKTVPQSSL', 'AIKELGDHVTNLRKMG', 'AIKELGDHVTNLRKMGAPE',
'AIVVDPVHGF', 'AIVVDPVHGFM', 'AKRVIISAPSADAP', 'AKRVIISAPSADAPM']
)
test_samples = [tester_a, tester_b]
'''
class MhcToolHelper:
def __init__(self,
tmp_directory: str,
samples: List[MhcPeptides],
mhc_class: str = 'I',
alleles: List[str] = ('HLA-A03:02', 'HLA-A02:02'),
min_length: int = 8,
max_length: int = 12):
from MhcVizPipe.defaults import Parameters
self.Parameters = Parameters()
self.GIBBSCLUSTER = self.Parameters.GIBBSCLUSTER
self.NETMHCPAN = self.Parameters.NETMHCPAN
self.NETMHCIIPAN = self.Parameters.NETMHCIIPAN
self.NETMHCPAN_VERSION = self.Parameters.NETMHCPAN_VERSION
if isinstance(alleles, str):
if ',' in alleles:
alleles = alleles.split(',')
elif ' ' in alleles:
alleles = alleles.split(' ')
else:
alleles = [alleles]
self.samples: List[MhcPeptides] = samples
self.descriptions = {sample.sample_name: sample.sample_description for sample in samples}
self.alleles = alleles
self.mhc_class = mhc_class
self.min_length = min_length
self.max_length = max_length
self.predictions = pd.DataFrame(
columns=['Sample', 'Peptide', 'Allele', 'Rank', 'Binder']
)
self.tmp_folder = Path(tmp_directory)
self.tmp_folder.mkdir(parents=True)
self.predictions_made = False
self.gibbs_directories = []
self.supervised_gibbs_directories = {}
self.gibbs_cluster_lengths = {}
for sample in self.samples:
with open(str(self.tmp_folder / f'{sample.sample_name}.peptides'), 'w') as f:
for pep in sample.peptides:
f.write(pep + '\n')
def make_binding_predictions(self):
n = int(os.cpu_count())
# split peptide list into chunks
for sample in self.samples:
peptides = np.array(clean_peptides(sample.peptides))
lengths = np.vectorize(len)(peptides)
peptides = peptides[(lengths >= self.min_length) & (lengths <= self.max_length)]
np.random.shuffle(peptides) # we need to shuffle them so we don't end up with files filled with peptide lengths that take a LONG time to compute (this actually is a very significant speed up)
if len(peptides) > 100:
chunks = array_split(peptides, n)
else:
chunks = [peptides]
jobs = []
job_number = 1
results = []
for chunk in chunks:
if len(chunk) < 1:
continue
fname = Path(self.tmp_folder, f'{sample.sample_name}_{job_number}.csv')
fout = Path(self.tmp_folder, f'{sample.sample_name}_netmhcpan_results_{job_number}.tsv')
results.append(fout)
# save the new peptide list, this will be given to netMHCpan
if self.mhc_class == 'I':
chunk = chunk[np.vectorize(len)(chunk) >= 8]
if self.mhc_class == 'II':
chunk = chunk[np.vectorize(len)(chunk) >= 9]
chunk.tofile(str(fname), '\n', '%s')
job_number += 1
# run netMHCpan
if self.mhc_class == 'I':
command = f'{self.NETMHCPAN} -p -f {fname} -a {",".join(self.alleles)}'.split(' ')
else:
command = f'{self.NETMHCIIPAN} -inptype 1 -f {fname} -a {",".join(self.alleles)}'.split(' ')
jobs.append(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
# finish jobs and check return values
for job in jobs:
stdout, stderr = job.communicate()
self.parse_netmhc_output(stdout.decode('utf-8'), sample.sample_name)
self.predictions.to_csv(str(Path(self.tmp_folder)/f'{sample.sample_name}_netMHC'
f'{"II" if self.mhc_class == "II" else ""}'
f'pan_predictions.csv'))
def cluster_with_gibbscluster(self):
n_cpus = int(os.cpu_count())
# split peptide list into chunks
n_samples = len(self.samples)
cpus_per_job = [len(x) if len(x) > 0 else 1 for x in array_split(range(n_cpus), n_samples)]
jobs = []
os.chdir(self.tmp_folder)
for sample in self.samples:
i = self.samples.index(sample)
k = cpus_per_job[i]
n = len(self.alleles)
fname = Path(self.tmp_folder, f'{sample.sample_name}_forgibbs.csv')
peps = np.array(clean_peptides(sample.peptides))
lengths = np.vectorize(len)(peps)
peps = peps[(lengths >= self.min_length) & (lengths <= self.max_length)]
peps.tofile(str(fname), '\n', '%s')
if self.mhc_class == 'I':
command = f'{self.GIBBSCLUSTER} -f {fname} -P {sample.sample_name} -k 5 ' \
f'-g 1-{n if n>5 else 5} -T -j 2 -C -D 4 -I 1 -G'.split(' ')
else:
command = f'{self.GIBBSCLUSTER} -f {fname} -P {sample.sample_name} -k 5 ' \
f'-g 1-{n if n>5 else 5} -T -j 2 -G'.split(' ')
#jobs.append(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
job = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
job.communicate()
for job in jobs:
job.communicate()
if job.returncode != 0:
raise subprocess.SubprocessError(f"Error in running: {job.args}\n\n{job.stderr}")
ls = list(self.tmp_folder.glob('*'))
for f in ls:
if Path(f).is_dir():
self.gibbs_directories.append(f)
def cluster_with_gibbscluster2(self):
n_cpus = int(os.cpu_count())
# split peptide list into chunks
n_samples = len(self.samples)
n_alleles = len(self.alleles)
n_jobs = n_samples + (n_alleles + 1)*n_samples
cpus_per_job = []
cpus_per_job += [5 for x in range(n_samples)] # for all-peptide runs
cpus_per_job += [1 for x in range(n_samples*n_alleles)] # for allele-specific runs
cpus_per_job += [5 for x in range(n_samples)] # for non-binder runs
cpus_per_job = [len(x) if len(x) > 0 else 1 for x in array_split(range(n_cpus), n_samples)]
jobs = []
os.chdir(self.tmp_folder)
# first by sample
for sample in self.samples:
i = self.samples.index(sample)
k = cpus_per_job[i]
n = len(self.alleles)
fname = Path(self.tmp_folder, f'{sample.sample_name}_forgibbs.csv')
peps = np.array(clean_peptides(sample.peptides))
lengths = np.vectorize(len)(peps)
peps = peps[(lengths >= self.min_length) & (lengths <= self.max_length)]
peps.tofile(str(fname), '\n', '%s')
if self.mhc_class == 'I':
command = f'{self.GIBBSCLUSTER} -f {fname} -P {sample.sample_name} -k 5 ' \
f'-g 1-{n if n>5 else 5} -T -j 2 -C -D 4 -I 1 -G'.split(' ')
else:
command = f'{self.GIBBSCLUSTER} -f {fname} -P {sample.sample_name} -k 5 ' \
f'-g 1-{n if n>5 else 5} -T -j 2 -G'.split(' ')
jobs.append(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
# now by allele
alleles = list(self.predictions['Allele'].unique())
samples = list(self.predictions['Sample'].unique())
n_cpus = int(os.cpu_count())
n_samples = len(samples) * (len(alleles) + 1)
cpus_per_job = [len(x) if len(x) > 0 else 1 for x in array_split(range(n_cpus), n_samples)]
jobs = []
os.chdir(self.tmp_folder)
i = 0
for sample in self.samples:
self.supervised_gibbs_directories[sample.sample_name] = {}
sample_peps = self.predictions.loc[self.predictions['Sample'] == sample.sample_name, :]
allele_peps = {}
for allele in alleles:
allele_peps[allele] = set(list(sample_peps.loc[(sample_peps['Allele'] == allele) &
((sample_peps['Binder'] == 'Strong') |
(sample_peps[
'Binder'] == 'Weak')), 'Peptide'].unique()))
allele_sets = allele_peps
allele_sets['unannotated'] = set(list(sample_peps['Peptide']))
for allele in alleles:
allele_sets['unannotated'] = allele_sets['unannotated'] - allele_sets[allele]
for allele, peps in allele_sets.items():
n_cpus = cpus_per_job[i]
i += 1
fname = Path(self.tmp_folder, f"{allele}_{sample.sample_name}_forgibbs.csv")
peps = np.array(list(allele_sets[allele]))
if len(peps) < 10:
self.supervised_gibbs_directories[sample.sample_name][allele] = None
else:
lengths = np.vectorize(len)(peps)
peps = peps[(lengths >= self.min_length) & (lengths <= self.max_length)]
peps.tofile(str(fname), '\n', '%s')
g = "1-5" if allele == "unannotated" else "1"
if self.mhc_class == 'I':
if 'kb' in allele.lower():
length = 8
else:
length = 9
command = f'{self.GIBBSCLUSTER} -f {fname} -P {allele}_{sample.sample_name} -k 24 -l {str(length)} ' \
f'-g {g} -T -j 2 -C -D 4 -I 1 -G'.split(' ')
else:
command = f'{self.GIBBSCLUSTER} -f {fname} -P {allele}_{sample.sample_name} -k 24 ' \
f'-g {g} -T -j 2 -G'.split(' ')
jobs.append(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
for job in jobs:
job.communicate()
if job.returncode != 0:
raise subprocess.SubprocessError(f"Error in running: {job.args}\n\n{job.stderr}")
ls = list(self.tmp_folder.glob('*'))
for f in ls:
if Path(f).is_dir():
self.gibbs_directories.append(f)
def cluster_with_gibbscluster_by_allele(self):
alleles = list(self.predictions['Allele'].unique())
samples = list(self.predictions['Sample'].unique())
n_cpus = int(os.cpu_count())
n_samples = len(samples) * (len(alleles) + 1)
cpus_per_job = [len(x) if len(x) > 0 else 1 for x in array_split(range(n_cpus), n_samples)]
jobs = []
os.chdir(self.tmp_folder)
i = 0
for sample in self.samples:
self.supervised_gibbs_directories[sample.sample_name] = {}
sample_peps = self.predictions.loc[self.predictions['Sample'] == sample.sample_name, :]
allele_peps = {}
for allele in alleles:
allele_peps[allele] = set(list(sample_peps.loc[(sample_peps['Allele'] == allele) &
((sample_peps['Binder'] == 'Strong') |
(sample_peps['Binder'] == 'Weak')), 'Peptide'].unique()))
'''allele_sets = {}
for allele1 in alleles:
allele1_set = allele_peps[allele1]
for allele2 in alleles:
if allele1 == allele2:
continue
allele1_set = allele1_set - allele_peps[allele2]
allele_sets[allele1] = allele1_set
'''
allele_sets = allele_peps
allele_sets['unannotated'] = set(list(sample_peps['Peptide']))
for allele in alleles:
allele_sets['unannotated'] = allele_sets['unannotated'] - allele_sets[allele]
for allele, peps in allele_sets.items():
n_cpus = cpus_per_job[i]
i += 1
fname = Path(self.tmp_folder, f"{allele}_{sample.sample_name}_forgibbs.csv")
peps = np.array(list(allele_sets[allele]))
if len(peps) < 20:
self.supervised_gibbs_directories[sample.sample_name][allele] = None
else:
lengths = np.vectorize(len)(peps)
peps = peps[(lengths >= self.min_length) & (lengths <= self.max_length)]
peps.tofile(str(fname), '\n', '%s')
g = "1-5" if allele == "unannotated" else "1"
if self.mhc_class == 'I':
if 'kb' in allele.lower():
length = 8
else:
length = 9
command = f'{self.GIBBSCLUSTER} -f {fname} -P {allele}_{sample.sample_name} -k {5 if allele == "unannotated" else 1} -l {str(length)} ' \
f'-g {g} -T -j 2 -C -D 4 -I 1 -G'.split(' ')
else:
command = f'{self.GIBBSCLUSTER} -f {fname} -P {allele}_{sample.sample_name} -k {5 if allele == "unannotated" else 1} ' \
f'-g {g} -T -j 2 -G'.split(' ')
jobs.append(subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
# job = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# job.communicate()
for job in jobs:
job.communicate()
if job.returncode != 0:
raise subprocess.SubprocessError(f"Error in running: {job.args}\n\n{job.stderr}")
for sample in self.samples:
for allele in alleles + ['unannotated']:
ls = list(self.tmp_folder.glob(allele+'_'+sample.sample_name+'*'))
for f in ls:
if Path(f).is_dir():
self.supervised_gibbs_directories[sample.sample_name][allele] = f
def parse_netmhc_output(self, stdout: str, sample: str):
rows = []
lines = stdout.split('\n')
if self.mhc_class == 'I': # works for 4.0 and 4.1, will need to keep an eye on future releases
allele_idx = 1
peptide_idx = 2
rank_idx = 12
else: # works for NetMHCIIpan4.0
allele_idx = 1
peptide_idx = 2
rank_idx = 8
for line in lines:
line = line.strip()
line = line.split()
if not line or line[0] == '#' or not line[0].isnumeric():
continue
allele = line[allele_idx].replace('*', '')
peptide = line[peptide_idx]
rank = line[rank_idx]
if self.mhc_class == 'I':
if float(rank) <= 0.5:
binder = 'Strong'
elif float(rank) <= 2.0:
binder = 'Weak'
else:
binder = 'Non-binder'
else:
if float(rank) <= 2:
binder = 'Strong'
elif float(rank) <= 10:
binder = 'Weak'
else:
binder = 'Non-binder'
rows.append((sample, peptide, allele, rank, binder))
self.predictions = self.predictions.append(
pd.DataFrame(columns=['Sample', 'Peptide', 'Allele', 'Rank', 'Binder'], data=rows),
ignore_index=True
)
if len(rows) == 0:
print(stdout)
| [
"kkovalchik.chusj@gmail.com"
] | kkovalchik.chusj@gmail.com |
230efaebb3d67455f273faf77d233fb1d038bee7 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/battle/shared/battle_timers.py | 9eb8d5f3eddda8d038fdb34a6114a105b38e380a | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,662 | py | # 2017.05.04 15:22:30 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/battle_timers.py
import SoundGroups
from gui.Scaleform.daapi.view.meta.BattleTimerMeta import BattleTimerMeta
from gui.Scaleform.daapi.view.meta.PrebattleTimerMeta import PrebattleTimerMeta
from gui.battle_control.battle_constants import COUNTDOWN_STATE
from helpers import dependency
from helpers import i18n
from skeletons.gui.battle_session import IBattleSessionProvider
from BattleReplay import g_replayCtrl
class _WWISE_EVENTS:
BATTLE_ENDING_SOON = 'time_buzzer_02'
COUNTDOWN_TICKING = 'time_countdown'
BATTLE_END = 'time_over'
STOP_TICKING = 'time_countdown_stop'
_BATTLE_END_SOUND_TIME = 2
_BATTLE_END_TIME = 0
_STATE_TO_MESSAGE = {COUNTDOWN_STATE.WAIT: i18n.makeString('#ingame_gui:timer/waiting'),
COUNTDOWN_STATE.START: i18n.makeString('#ingame_gui:timer/starting'),
COUNTDOWN_STATE.STOP: i18n.makeString('#ingame_gui:timer/started')}
class PreBattleTimer(PrebattleTimerMeta):
def __init__(self):
super(PreBattleTimer, self).__init__()
def setWinConditionText(self, text):
self.as_setWinConditionTextS(text)
def setCountdown(self, state, timeLeft):
self.as_setMessageS(_STATE_TO_MESSAGE[state])
if state == COUNTDOWN_STATE.WAIT:
self.as_hideTimerS()
else:
self.as_setTimerS(timeLeft)
def hideCountdown(self, state, speed):
self.as_setMessageS(_STATE_TO_MESSAGE[state])
self.as_hideAllS(speed)
class BattleTimer(BattleTimerMeta):
sessionProvider = dependency.descriptor(IBattleSessionProvider)
def __init__(self):
super(BattleTimer, self).__init__()
self.__isTicking = False
self.__state = COUNTDOWN_STATE.UNDEFINED
self.__roundLength = self.arenaVisitor.type.getRoundLength()
self.__endingSoonTime = self._getEndingSoonTime()
self.__endWarningIsEnabled = self.__checkEndWarningStatus()
self.__sounds = dict()
def destroy(self):
for sound in self.__sounds.values():
sound.stop()
self.__sounds.clear()
super(BattleTimer, self).destroy()
@property
def arenaVisitor(self):
return self.sessionProvider.arenaVisitor
def setTotalTime(self, totalTime):
minutes, seconds = divmod(int(totalTime), 60)
if self.__endWarningIsEnabled and self.__state == COUNTDOWN_STATE.STOP:
if _BATTLE_END_TIME < totalTime <= self.__endingSoonTime:
if not self.__isTicking:
self.__startTicking()
if totalTime == self.__endingSoonTime:
self._callWWISE(_WWISE_EVENTS.BATTLE_ENDING_SOON)
elif self.__isTicking:
self.__stopTicking()
playBattleEnd = not g_replayCtrl.isPlaying if g_replayCtrl is not None else True
if totalTime == _BATTLE_END_SOUND_TIME and playBattleEnd:
self._callWWISE(_WWISE_EVENTS.BATTLE_END)
self.as_setTotalTimeS('{:02d}'.format(minutes), '{:02d}'.format(seconds))
return
def setState(self, state):
self.__state = state
def hideTotalTime(self):
self.as_showBattleTimerS(False)
def showTotalTime(self):
self.as_showBattleTimerS(True)
def _callWWISE(self, wwiseEventName):
"""
Method is used to play or stop sounds.
Pretected for testing purposes.
"""
sound = SoundGroups.g_instance.getSound2D(wwiseEventName)
if sound is not None:
sound.play()
self.__sounds[wwiseEventName] = sound
return
def _getEndingSoonTime(self):
return self.arenaVisitor.type.getBattleEndingSoonTime()
def __startTicking(self):
self._callWWISE(_WWISE_EVENTS.COUNTDOWN_TICKING)
self.__isTicking = True
self.as_setColorS(self.__isTicking)
def __stopTicking(self):
self._callWWISE(_WWISE_EVENTS.STOP_TICKING)
self.__isTicking = False
self.as_setColorS(self.__isTicking)
def __validateEndingSoonTime(self):
return 0 < self.__endingSoonTime < self.__roundLength
def __checkEndWarningStatus(self):
endingSoonTimeIsValid = self.__validateEndingSoonTime()
return self.arenaVisitor.isBattleEndWarningEnabled() and endingSoonTimeIsValid
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\battle\shared\battle_timers.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:22:30 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
500a5e47dd500faba25804ccc28daca24c144336 | ecad779936caddc8721a85cfc5512dcdb4e0c2b8 | /whosSalty.py | 5e9605a480fba7fd46f9e82bb02eb824e7babbc5 | [] | no_license | AdamMcWilliam/TwitchBot | 6a198aaf13d39ef197922181425b0ab8be2a2f92 | 27c906cca5a6b6dfaf05f2b3a91280ae971daa70 | refs/heads/master | 2023-03-07T18:51:33.610216 | 2021-02-15T20:04:36 | 2021-02-15T20:04:36 | 275,300,379 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def whosSalty(message):
#get tagged user from message
user = message.split('!!salt')
user = user[1]
return user | [
"adammcwilliam@gmail.com"
] | adammcwilliam@gmail.com |
035a827e773c3de395f449d5ef3dc7259c37a300 | 7ed2fe385b37f97b6a7443376eb7a81daecf0925 | /myPipeline/pipelineMainFunction.py | d61326f34ae0f4580546976b988d2462cfc40bd5 | [] | no_license | qiusiyuan/maya-mypipeline-python | ce370025da957bade30c8e89a72f511122cf2eac | ac32a755191b02e3834ae49d8450f158126e8c29 | refs/heads/master | 2021-01-11T05:10:39.059070 | 2017-02-13T21:58:42 | 2017-02-13T21:58:42 | 81,138,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | ### pipelineMainFunction ###
import os
import os.path
import maya.cmds as cmds
### ----- openProjectData ------###
## this function return a list of projects used pipeline ##
## input: None
## rtype: List<String>
## Author : Qiu Siyuan
## ------------------------------------------------------------##
def openProjectData(project_path):
projects_list = []
for df in os.listdir(project_path):
if os.path.isdir(project_path+df):
projects_list.append(df)
return projects_list
### ----- delete_project ----- ###
## delete project that given by name under projects folder ##
## input project_path : str
## input project_name : str
## rtypr : none
## Author : Qiu Siyuan
## ---------------------------------------------------------- ##
def delete_project(project_path, project_name, *pargs):
for root, dirs, files in os.walk(project_path + project_name, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
### ----- save_selected ----- ###
## save currently selected item with given name in the folder(category) user choose ##
## input item_name : str
## input folder_path : str
## rtype : none
## Author : Qiu Siyuan
## -------------------------------------------------------------- ##
def save_selected( item_name,folder_path):
cmds.file(folder_path+'/'+item_name, exportSelected = True,type = "mayaBinary")
### ----- save_scene ------ ###
## save the whole scene to the folder user choose ##
## input scene_name : str
## input folder_path : str
## rtype : none
## Author : Qiu Siyuan
## ----------------------------------------------------------------- ##
def save_scene(scene_name, folder_path):
cmds.file(rename = folder_path + '/'+ scene_name)
cmds.file(save = True, type = "mayaBinary")
##
| [
"noreply@github.com"
] | qiusiyuan.noreply@github.com |
ce89af89e32ee3e5fab47cc25cfea1569b838ced | 30a4ce8a942cf524950df0155ac1bd9b5664c9da | /preprocessing_scripts/remove_blurry_images.py | b562cc0659381c4e95a25261e083335160420de0 | [] | no_license | rishabkatta/Diabetic-Retinopathy-Detection | af21b06bfe72e4407ad8d60f9d62c9182ef6d3a7 | eaa621c6092e3720b36f5a8623130621ebd6e35a | refs/heads/main | 2023-05-01T19:46:15.045646 | 2021-05-22T19:26:23 | 2021-05-22T19:26:23 | 369,883,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import cv2
import os
def remove_blurry_images(src_folder):
img_names_list = os.listdir(src_folder)
for img in img_names_list:
if img.endswith('.jpeg'):
img2 = cv2.imread(src_folder+img, cv2.IMREAD_GRAYSCALE)
laplacian_var = cv2.Laplacian(img2, cv2.CV_64F).var()
if laplacian_var < 10:
print(img + " :Image blurry")
os.remove(src_folder+img)
remove_blurry_images(src_folder = '../processed_299_299/') | [
"rishabkatta22@gmail.com"
] | rishabkatta22@gmail.com |
7c77d04d6ae5ca281c1f431851c0b5ec3df2fe8e | 0a1285f387bf9b17424dd5da3757f5ae5662b406 | /ryoki/accounts/views.py | 77e77d32eb914d111a3cdae50ca61656e6f0d1c1 | [] | no_license | monkhaus/django_tailwindcss | 9ed9fbb56132a4d833707897ec990b8df4dfc206 | c2daf93cc7482bb20beead3e073dbf844481a64d | refs/heads/main | 2023-03-09T20:12:38.598781 | 2021-01-18T08:19:25 | 2021-01-18T08:19:25 | 324,114,303 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.forms import UserCreationForm
from .forms import CreateUserForm
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .decorators import unauthenticated_user, allowed_users
from django.contrib.auth.models import Group
@unauthenticated_user
def registerPage(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
print("lol")
user = form.save()
username = form.cleaned_data.get('username')
group = Group.objects.get(name='customer')
user.groups.add(group)
messages.success(request, "Account was created for {}".format(username))
return redirect(reverse('accounts:loginPage'))
context = {'form':form}
return render(request, 'accounts/register.html', context)
@unauthenticated_user
def loginPage(request):
if request.method == 'POST':
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect(reverse('dashboard:index'))
else:
messages.info(request, 'username or password is incorrect')
return redirect(reverse('accounts:loginPage'))
context = {}
return render(request, 'accounts/login.html', context)
def logoutUser(request):
logout(request)
return redirect(reverse('accounts:loginPage')) | [
"tom.monkhouse95@gmail.com"
] | tom.monkhouse95@gmail.com |
efd1a695c880f32f7ca9e19c996ce4ddfcd3aba0 | a685fa36823caa4b910969e80adbcae4f2829bd6 | /Django/surveyForm/surveyForm/settings.py | 6bcb8aa622819edcd41b0cb56a5f57fe504bcc62 | [] | no_license | hmp36/Python | 35e4fbc003b216ca6c7c45c558dd4f24edba5b9a | dcddf40a0428542d78f2e32d4755b54ebdaadffd | refs/heads/master | 2021-09-15T01:35:41.013949 | 2018-05-23T13:05:59 | 2018-05-23T13:05:59 | 112,358,856 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for surveyForm project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=vr693pdpgsgdjb+x2!o^w+s5zs)86b&9yyo-v#2^rjh9p$*&b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.surveys',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'surveyForm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'surveyForm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"haganpratt@gmail.com"
] | haganpratt@gmail.com |
869cf5c31d163b07e775e440178d957f4fc9f67f | 8bd89a88f353e44f6d2101d3d5388c3682cc0575 | /app/run.py | 3c9fe5153948193b21480bee91a4c0c0e251ab62 | [] | no_license | nicotacor/Disaster-Response-Pipelines | cd3b21e225ca2c5a917b68a3f8dbe73c8bff2e9b | 5efd2af64798c57ea09ec35394184d8339c3ad93 | refs/heads/main | 2023-07-03T21:40:41.260433 | 2021-08-05T17:52:08 | 2021-08-05T17:52:08 | 393,064,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,060 | py | import json
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('disaster_messages', engine)
# load model
model = joblib.load("../models/classifier.pkl")
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
df1 = df.drop(['id','message','original','genre'], axis=1)
category_counts=df1.sum(axis=0)
category_names = df1.columns
# create visuals
graphs = [
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of Message Genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
},
{
'data': [
Bar(
x=category_names,
y=category_counts
)
],
'layout': {
'title': 'Distribution of Message Categories',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Category"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='127.0.0.1', port=3001, debug=True)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | nicotacor.noreply@github.com |
654278648914abddc2f6ef83e3848c63670989b5 | a06c382657910483ba6af54efaf3ce0779ed39ad | /python_src/natural_gradient.py | b21c2df1989b18d1e3d01898e41b3e59cf09fa62 | [] | no_license | chaeyeunpark/are-neural-quantum-states-good | 8d546312ee5976df7ddd5d6ab4bb851329599140 | a37ba05856d33f648c78351a38c1517f8233f978 | refs/heads/main | 2023-07-14T18:13:00.312904 | 2021-09-01T21:10:14 | 2021-09-01T21:10:14 | 361,696,144 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,877 | py | import jax
import jax.numpy as jnp
from jax import jacfwd, jacrev
import flax.linen as nn
from tree_utils import to_list, reshape_tree_like
from functools import partial
#from jax.interpreters import xla
import sys
class NGD:
_model: nn.Module
_fisher: jnp.array
_grad: jnp.array
learning_rate: float
beta1: float
beta2: float
t: int
def __init__(self, model: nn.Module, learning_rate: float, *,
beta1: float = 0.9, beta2: float = 0.999):
self._model = model
self._fisher = None
self._grad = None #gradient of the KL divergence
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.t = 0
def update_momentums(self, params, confs_data, confs_model):
new_fisher, grad_model = self._calc_fisher(params, confs_model)
if self._fisher is None:
self._fisher = new_fisher
else:
self._fisher = self.beta2*self._fisher + (1-self.beta2)*new_fisher
output_mean, grad_data = self._calc_grad(params, confs_data)
self.cross_entropy_unnormalized = -output_mean
new_grad = grad_model - to_list(grad_data)
if self._grad is None:
self._grad = new_grad
else:
self._grad = self.beta1*self._grad + (1-self.beta1)*new_grad
@partial(jax.jit, static_argnums = 0)
def _calc_fisher(self, params, confs):
batch_size = confs.shape[0]
grads = jacrev(self._model.apply)(params, confs)
grads = jnp.hstack([g.reshape(batch_size, -1) for g in jax.tree_leaves(grads)])
grad_mean = jnp.mean(grads, axis = 0)
grads = grads - grad_mean[None,:]
new_fisher = jnp.matmul(jnp.transpose(grads), grads) / batch_size
return new_fisher, grad_mean
@partial(jax.jit, static_argnums = 0)
def _calc_grad(self, params, confs):
f_data = lambda p: jnp.mean(self._model.apply(p, confs))
return jax.value_and_grad(f_data)(params)
def update_sgd(self, params):
b = reshape_tree_like(self._grad, params)
return jax.tree_multimap(lambda x, y: x - self.learning_rate*y, params, b)
def update(self, params):
#eps = max(1.0*(0.9**self.t), 1e-3)
self.t += 1
eps = 1e-3
b = jnp.linalg.solve(self._fisher + eps*jnp.identity(self._fisher.shape[0]), self._grad)\
* ((1. - jnp.power(self.beta2, self.t))/(1. - jnp.power(self.beta1, self.t)))
b = reshape_tree_like(b, params)
return jax.tree_multimap(lambda x, y: x - self.learning_rate*y, params, b)
def state_dict(self):
return {'fisher': self._fisher, 'grad': self._grad, 't': self.t}
def load_state_dict(self, state_dict):
self._fisher = state_dict['fisher']
self._grad = state_dict['grad']
self.t = state_dict['t']
| [
"kaeri17@gmail.com"
] | kaeri17@gmail.com |
a979c54a4c4b024bf3c2ae35dc3a870b46d2ed39 | 88e13a6e521db411403744d97042c013cf0d63df | /examples/textbox_cursor.py | 695bf130c53850d0e29d5f1118219560c96952d2 | [
"BSD-3-Clause"
] | permissive | curiousTauseef/guizero | 5caab0cf7a9276a6898cbb0b1a9aa208f1344159 | 7744c41a1e747ade2e5913638586073c27b4db9b | refs/heads/master | 2023-05-31T13:27:32.998132 | 2021-05-25T14:51:10 | 2021-05-25T14:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from guizero import App, TextBox, Text
from tkinter import INSERT
def key_pressed(key):
print("key pressed {}".format(key))
print("textbox value = {}".format(textbox.value))
print("cursor pos = {}".format(textbox.tk.index(INSERT)))
app = App()
text = Text(app, text="Enter some text")
textbox = TextBox(app, width=40, command=key_pressed)
app.display() | [
"martin.ohanlon@raspberrypi.org"
] | martin.ohanlon@raspberrypi.org |
db00bcbca877723a4f83baab08aa6dfbf4fbccc4 | 8641773b546fc41a36a78af7e2e1755e37b724c6 | /2020/day19.py | 668802cd0479c9ff7a9daa6e2c140eda404f1fe7 | [] | no_license | RugeljGG/AdventOfCode | 87fe865d748b22ff9456083e84a671896498bb72 | b8367aac0807d123d0921fa6e64404a9d904466e | refs/heads/master | 2023-02-05T19:02:24.008937 | 2023-01-20T18:32:40 | 2023-01-20T18:32:40 | 160,921,509 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 05:59:42 2020
@author: gape
"""
import copy
from collections import Counter, defaultdict
import re
import aoc_helper
data = aoc_helper.get_input(19, force=True).strip()
# data = aoc_helper.get_input(19).strip()
print('Day 19 input (first 10 lines):')
print('\n'.join(data.split('\n')[:10]))
print('\nTotal input length: ', len(data))
print('Total input row count: ', len(data.split('\n'))-1)
print("\n############################################################\n")
rules_raw = dict()
r, msgs = data.split('\n\n')
for row in r.split('\n'):
k, v = row.split(': ')
rls = v.split(' | ')
rules_raw[int(k)] = []
for rv in rls:
if '"' in rv:
rules_raw[int(k)].append([rv[1]])
else:
rules_raw[int(k)].append([int(c) for c in rv.split()])
def parse(i, part=1):
if part==2 and i in (11, 8):
return ['_{}_'.format(i)]
r = rules_raw[int(i)]
rls = []
for rl in r:
if isinstance(rl[0], str):
return [rl[0]]
else:
if len(rl) == 1:
for rl1 in parse(rl[0], part=part):
rls.append(rl1)
else:
for rl1 in parse(rl[0], part=part):
for rl2 in parse(rl[1], part=part):
try:
rls.append(rl1+rl2)
except:
print(i)
raise
return rls
# return '|'.join([','.join(r) for r in rls])
t1 = parse(42)
t2 = parse(31)
num = len(t1[0])
count = 0
for msg in msgs.split():
if len(msg)==num*3 and msg[:num] in t1 and msg[num:num*2] in t1 and msg[num*2:num*3] in t2:
count+=1
print("Part 1 answer:", count)
count = 0
correct = []
for msg in msgs.split():
i = 0
phase1 = True
c1 = 0
c2 = 0
while i < len(msg):
if phase1:
if msg[i:i+num] in t1:
i += num
c1 += 1
continue
else:
phase1 = False
else:
if msg[i:i+num] in t2:
i += num
c2 += 1
else:
break
else:
if i==len(msg) and c2>0 and c1 > c2:
count+=1
print("Part 2 answer:", count) | [
"gasper.rugelj@gmail.com"
] | gasper.rugelj@gmail.com |
6c322bd60ea20600c7a1275b99429ca552d28a30 | 57307389bc4e71b3386580ad6c18105c30fc38e3 | /incoming.py | 9f54ff12d5434936c1170d952b42a1d2e47348a0 | [] | no_license | JinhuaW/coding_fun_scripts | 31799c5df522a867a766fcb9020acc381b5c02a0 | 3193216636a75b791c2f79b75d8b7fd542cd7311 | refs/heads/master | 2021-01-11T02:55:47.283193 | 2016-11-17T12:36:38 | 2016-11-17T12:36:38 | 70,907,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | #!/usr/bin/env python
import re
from mail import *
from mercurial import *
from compile import *
from database import *
import datetime
def update_current_user(db):
email = get_email()
username = get_user()
db.update_user(username, email)
def handle_answer(id, db):
user = get_user()
node = get_node()
path = user + "/" + id
compile_update(path, node)
target = get_target(path)
email = get_email()
username = get_user()
db.update_user(username, email)
start_time = datetime.datetime.now()
result = check_result(path + "/" + target, id)
end_time = datetime.datetime.now()
total_time = end_time - start_time
time_cost = total_time.total_seconds()
compile_clean(path)
print "============================================================="
if result:
print "Congratulations, success!"
print "============================================================="
print "Your result:"
db.get_rank_time_cost(id, username, time_cost)
db.update_result(id, email, time_cost, 0)
else:
print "You program has failed!"
print "============================================================="
print "All result:"
db.show_rank_time_cost(id)
def handle_exercise(id, db):
update_current_user(db)
diff = get_diff()
desc = get_desc()
mail_list = db.get_maillist()
id = 'N' + id[1:]
db.update_exercise(id, desc)
send_mail_plain(diff, "[coding fun]Exercise %s update"%id, mail_from, mail_list)
def update_handle(id, db):
if id[0] == 'E':
print "exercise update"
handle_exercise(id, db)
elif id[0] == 'N':
print "handle_answer"
handle_answer(id, db)
else:
print "Unkown Exercise Number"
def check_type():
desc = get_desc()
m = re.match(r'^[EN]\d{8}', desc)
if m:
id = m.group()
return id
return None
if __name__ == '__main__':
id = check_type()
db=coding_db()
if id:
update_handle(id, db)
| [
"olineboy@163.com"
] | olineboy@163.com |
01417bd7579615c61c7b69dda895434350d0db5c | aa0270b351402e421631ebc8b51e528448302fab | /sdk/newrelicobservability/azure-mgmt-newrelicobservability/azure/mgmt/newrelicobservability/aio/operations/_monitors_operations.py | 6ad9961466432e670b9d18de4890ca8dc774e281 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 79,234 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._monitors_operations import (
build_create_or_update_request,
build_delete_request,
build_get_metric_rules_request,
build_get_metric_status_request,
build_get_request,
build_list_app_services_request,
build_list_by_resource_group_request,
build_list_by_subscription_request,
build_list_hosts_request,
build_list_monitored_resources_request,
build_switch_billing_request,
build_update_request,
build_vm_host_payload_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MonitorsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.newrelicobservability.aio.NewRelicObservabilityMgmtClient`'s
:attr:`monitors` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, **kwargs: Any) -> AsyncIterable["_models.NewRelicMonitorResource"]:
"""List NewRelicMonitorResource resources by subscription ID.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NewRelicMonitorResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.NewRelicMonitorResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("NewRelicMonitorResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/NewRelic.Observability/monitors"}
@distributed_trace
def list_by_resource_group(
self, resource_group_name: str, **kwargs: Any
) -> AsyncIterable["_models.NewRelicMonitorResource"]:
"""List NewRelicMonitorResource resources by resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NewRelicMonitorResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.NewRelicMonitorResourceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("NewRelicMonitorResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_resource_group.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors"
}
@distributed_trace_async
async def get(self, resource_group_name: str, monitor_name: str, **kwargs: Any) -> _models.NewRelicMonitorResource:
"""Get a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NewRelicMonitorResource or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.NewRelicMonitorResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NewRelicMonitorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
monitor_name: str,
resource: Union[_models.NewRelicMonitorResource, IO],
**kwargs: Any
) -> _models.NewRelicMonitorResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.NewRelicMonitorResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(resource, (IO, bytes)):
_content = resource
else:
_json = self._serialize.body(resource, "NewRelicMonitorResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("NewRelicMonitorResource", pipeline_response)
if response.status_code == 201:
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
deserialized = self._deserialize("NewRelicMonitorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
monitor_name: str,
resource: _models.NewRelicMonitorResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.NewRelicMonitorResource]:
"""Create a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param resource: Resource create parameters. Required.
:type resource: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NewRelicMonitorResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
monitor_name: str,
resource: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.NewRelicMonitorResource]:
"""Create a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param resource: Resource create parameters. Required.
:type resource: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NewRelicMonitorResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
monitor_name: str,
resource: Union[_models.NewRelicMonitorResource, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.NewRelicMonitorResource]:
"""Create a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param resource: Resource create parameters. Is either a NewRelicMonitorResource type or a IO
type. Required.
:type resource: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NewRelicMonitorResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.NewRelicMonitorResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
resource=resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("NewRelicMonitorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}"
}
@overload
async def update(
self,
resource_group_name: str,
monitor_name: str,
properties: _models.NewRelicMonitorResourceUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.NewRelicMonitorResource:
"""Update a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param properties: The resource properties to be updated. Required.
:type properties: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResourceUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NewRelicMonitorResource or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
monitor_name: str,
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.NewRelicMonitorResource:
"""Update a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param properties: The resource properties to be updated. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NewRelicMonitorResource or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
monitor_name: str,
properties: Union[_models.NewRelicMonitorResourceUpdate, IO],
**kwargs: Any
) -> _models.NewRelicMonitorResource:
"""Update a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param properties: The resource properties to be updated. Is either a
NewRelicMonitorResourceUpdate type or a IO type. Required.
:type properties: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResourceUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NewRelicMonitorResource or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.NewRelicMonitorResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.NewRelicMonitorResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "NewRelicMonitorResourceUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("NewRelicMonitorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, user_email: str, monitor_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
user_email=user_email,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, user_email: str, monitor_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a NewRelicMonitorResource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param user_email: User Email. Required.
:type user_email: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
user_email=user_email,
monitor_name=monitor_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs),
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}"
}
@overload
async def get_metric_rules(
self,
resource_group_name: str,
monitor_name: str,
request: _models.MetricsRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MetricRules:
"""Get metric rules.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the get metrics status request. Required.
:type request: ~azure.mgmt.newrelicobservability.models.MetricsRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricRules or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.MetricRules
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def get_metric_rules(
self,
resource_group_name: str,
monitor_name: str,
request: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MetricRules:
"""Get metric rules.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the get metrics status request. Required.
:type request: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricRules or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.MetricRules
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def get_metric_rules(
self, resource_group_name: str, monitor_name: str, request: Union[_models.MetricsRequest, IO], **kwargs: Any
) -> _models.MetricRules:
"""Get metric rules.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the get metrics status request. Is either a MetricsRequest type
or a IO type. Required.
:type request: ~azure.mgmt.newrelicobservability.models.MetricsRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricRules or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.MetricRules
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MetricRules] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(request, (IO, bytes)):
_content = request
else:
_json = self._serialize.body(request, "MetricsRequest")
request = build_get_metric_rules_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.get_metric_rules.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("MetricRules", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_metric_rules.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/getMetricRules"
}
@overload
async def get_metric_status(
self,
resource_group_name: str,
monitor_name: str,
request: _models.MetricsStatusRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MetricsStatusResponse:
"""Get metric status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the get metrics status request. Required.
:type request: ~azure.mgmt.newrelicobservability.models.MetricsStatusRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricsStatusResponse or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.MetricsStatusResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def get_metric_status(
self,
resource_group_name: str,
monitor_name: str,
request: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MetricsStatusResponse:
"""Get metric status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the get metrics status request. Required.
:type request: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricsStatusResponse or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.MetricsStatusResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def get_metric_status(
self,
resource_group_name: str,
monitor_name: str,
request: Union[_models.MetricsStatusRequest, IO],
**kwargs: Any
) -> _models.MetricsStatusResponse:
"""Get metric status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the get metrics status request. Is either a MetricsStatusRequest
type or a IO type. Required.
:type request: ~azure.mgmt.newrelicobservability.models.MetricsStatusRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricsStatusResponse or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.MetricsStatusResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.MetricsStatusResponse] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(request, (IO, bytes)):
_content = request
else:
_json = self._serialize.body(request, "MetricsStatusRequest")
request = build_get_metric_status_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.get_metric_status.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("MetricsStatusResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_metric_status.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/getMetricStatus"
}
@overload
def list_app_services(
self,
resource_group_name: str,
monitor_name: str,
request: _models.AppServicesGetRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncIterable["_models.AppServiceInfo"]:
"""List the app service resources currently being monitored by the NewRelic resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the app services get request. Required.
:type request: ~azure.mgmt.newrelicobservability.models.AppServicesGetRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceInfo or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.AppServiceInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def list_app_services(
self,
resource_group_name: str,
monitor_name: str,
request: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncIterable["_models.AppServiceInfo"]:
"""List the app service resources currently being monitored by the NewRelic resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the app services get request. Required.
:type request: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceInfo or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.AppServiceInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def list_app_services(
self,
resource_group_name: str,
monitor_name: str,
request: Union[_models.AppServicesGetRequest, IO],
**kwargs: Any
) -> AsyncIterable["_models.AppServiceInfo"]:
"""List the app service resources currently being monitored by the NewRelic resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the app services get request. Is either a AppServicesGetRequest
type or a IO type. Required.
:type request: ~azure.mgmt.newrelicobservability.models.AppServicesGetRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceInfo or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.AppServiceInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.AppServicesListResponse] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(request, (IO, bytes)):
_content = request
else:
_json = self._serialize.body(request, "AppServicesGetRequest")
def prepare_request(next_link=None):
if not next_link:
request = build_list_app_services_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.list_app_services.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AppServicesListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_app_services.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/listAppServices"
}
@overload
async def switch_billing( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
monitor_name: str,
request: _models.SwitchBillingRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> None:
"""Switches the billing for NewRelic monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the switch billing request. Required.
:type request: ~azure.mgmt.newrelicobservability.models.SwitchBillingRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def switch_billing( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
monitor_name: str,
request: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> None:
"""Switches the billing for NewRelic monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the switch billing request. Required.
:type request: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def switch_billing( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
monitor_name: str,
request: Union[_models.SwitchBillingRequest, IO],
**kwargs: Any
) -> None:
"""Switches the billing for NewRelic monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the switch billing request. Is either a SwitchBillingRequest
type or a IO type. Required.
:type request: ~azure.mgmt.newrelicobservability.models.SwitchBillingRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[None] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(request, (IO, bytes)):
_content = request
else:
_json = self._serialize.body(request, "SwitchBillingRequest")
request = build_switch_billing_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.switch_billing.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
switch_billing.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/switchBilling"
}
@overload
def list_hosts(
self,
resource_group_name: str,
monitor_name: str,
request: _models.HostsGetRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncIterable["_models.VMInfo"]:
"""List the compute vm resources currently being monitored by the NewRelic resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the Hosts get request. Required.
:type request: ~azure.mgmt.newrelicobservability.models.HostsGetRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VMInfo or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.VMInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def list_hosts(
self,
resource_group_name: str,
monitor_name: str,
request: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncIterable["_models.VMInfo"]:
"""List the compute vm resources currently being monitored by the NewRelic resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the Hosts get request. Required.
:type request: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VMInfo or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.VMInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def list_hosts(
self, resource_group_name: str, monitor_name: str, request: Union[_models.HostsGetRequest, IO], **kwargs: Any
) -> AsyncIterable["_models.VMInfo"]:
"""List the compute vm resources currently being monitored by the NewRelic resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:param request: The details of the Hosts get request. Is either a HostsGetRequest type or a IO
type. Required.
:type request: ~azure.mgmt.newrelicobservability.models.HostsGetRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VMInfo or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.VMInfo]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.VMHostsListResponse] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(request, (IO, bytes)):
_content = request
else:
_json = self._serialize.body(request, "HostsGetRequest")
def prepare_request(next_link=None):
if not next_link:
request = build_list_hosts_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.list_hosts.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VMHostsListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_hosts.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/listHosts"
}
@distributed_trace
def list_monitored_resources(
self, resource_group_name: str, monitor_name: str, **kwargs: Any
) -> AsyncIterable["_models.MonitoredResource"]:
"""List the resources currently being monitored by the NewRelic monitor resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MonitoredResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.newrelicobservability.models.MonitoredResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.MonitoredResourceListResponse] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_monitored_resources_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_monitored_resources.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("MonitoredResourceListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_monitored_resources.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/monitoredResources"
}
@distributed_trace_async
async def vm_host_payload(
self, resource_group_name: str, monitor_name: str, **kwargs: Any
) -> _models.VMExtensionPayload:
"""Returns the payload that needs to be passed in the request body for installing NewRelic agent
on a VM.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param monitor_name: Name of the Monitors resource. Required.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VMExtensionPayload or the result of cls(response)
:rtype: ~azure.mgmt.newrelicobservability.models.VMExtensionPayload
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.VMExtensionPayload] = kwargs.pop("cls", None)
request = build_vm_host_payload_request(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.vm_host_payload.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("VMExtensionPayload", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
vm_host_payload.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/NewRelic.Observability/monitors/{monitorName}/vmHostPayloads"
}
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
68d4e4acf13f56dffb71d02700b3026366f2f714 | 8936f767df76a76da60f9f6ec059e455b351d262 | /pyhanlp/tests/Eng.py | 1a7cbc0eb0a33af74fcffe9d26cc7bdc673bceba | [
"Apache-2.0"
] | permissive | mx-pan/Papers_Classification | 8d4a9dd50dfd3993cf67edf8c85a6b4bbc3e0b9e | d9c46ae889a6a4266f8b7b9db0e56fb33bb7d1d6 | refs/heads/master | 2020-05-13T17:32:06.358527 | 2019-04-16T09:27:50 | 2019-04-16T09:27:50 | 181,644,178 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,586 | py | #usr/bin/python
# -*- coding:utf-8 -*-
from os import listdir
import os.path
import os
import numpy as np
import pandas as pd
import re
import sys
import importlib
importlib.reload(sys)
from gensim.models import Word2Vec
import logging,gensim,os
from gensim import corpora
from gensim import corpora, models, similarities
def GetFileList(dir, FileList):
newDir = dir
if os.path.isfile(dir):
FileList.append(dir)
elif os.path.isdir(dir):
for s in os.listdir(dir):
# 如果需要忽略某些文件夹,使用以下代码
# if s == "xxx":
# continue
newDir = os.path.join(dir, s)
GetFileList(newDir, FileList)
return FileList
def GetAbstract(FileList):
content = pd.concat([pd.read_table(name, encoding="gbk", engine='python') for name in FileList], axis=1)#读取待分类文件及内容
content=content.T
content.columns=['title','no','Key_word','no','Abstract']
content.drop(['no'],axis=1,inplace=True)
Abstract= content['Abstract']
return Abstract
def GetLabelList(LabelPath):#输入包含标签的文件绝对路径
f = open(LabelPath, encoding="gbk")
LabelList = []
for line in f.readlines():
LabelList.append(line.strip('\n').strip('*'))
f.close()
return LabelList
def CleanText(text,StopList):#格式转换
texts=list(text)
text = [[word for word in label.lower().split() if word not in StopList]
for label in texts]
return text
def GetDictionary(FileList,StopList,SaveDictPath):#将FileList所有文件合并,自动生成字典
class TextLoader(object):
def __init__(self):
pass
def __iter__(self):
for file in FileList:
input = open(file,'r',encoding='gbk')
line = str(input.readline())
counter = 0
while line!=None and len(line) > 4:
#print line
segments = line.lower().split(' ')
yield segments
line = str(input.readline())
sentences = TextLoader()
texts = [[word for word in sentences if word not in StopList]#将训练文本去除停用词
for sentences in sentences]
dictionary = corpora.Dictionary(texts)#建立词典并保存
dictionary.save(SaveDictPath)
return dictionary
def Classify(StopList, FilePath, SaveDictPath, LabelPath):
FileList = GetFileList(FilePath, [])
LabelList = GetLabelList(LabelPath)
labeltexts = CleanText(LabelList, StopList)
dictionary = GetDictionary(FileList, StopList, SaveDictPath)
corpus = [dictionary.doc2bow(text) for text in labeltexts]
Abstract = GetAbstract(FileList)
Abstracts = list(Abstract)
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=40) # 在 lsi 空间中计算文本相似度
index = similarities.MatrixSimilarity(lsi[corpus])
res = []
for i in range(len(Abstract)):
new_doc = Abstracts[i]
new_doc = new_doc.lower().split()
new_vec = [word for word in new_doc if word not in StopList]
vec_bow = dictionary.doc2bow(new_vec)
vec_lsi = lsi[vec_bow]
sims = index[vec_lsi]
res.append(FileList[i] + ' 属于 ' + LabelList[np.argmax(sims)])
return res
StopList = set('\n,.\their been it we in such both which abstract(#br)in or has was new the from by that as these the have be were an by with for a of the and to in (#br)paper **题目**\n **关键词**\n **摘要**\n a on is are this this'.split())
# FilePath = '/Users/yuchengqi/PycharmProjects/Text_segmentation/英文样本集'#此处为包含文件的
FilePath = 'D:\\papers_classification\\pyhanlp\\tests\\categories\\英文样本集'
# SaveDictPath='/Users/yuchengqi/PycharmProjects/Text_segmentation/control.dict'#'希望保存字典文件的路径,可以随便写一个
SaveDictPath = 'D:\\papers_classification\\pyhanlp\\tests\\data\\control.dict'#'希望保存字典文件的路径,可以随便写一个
# LabelPath='categories for test_2.txt'#标签文件的绝对路径
LabelPath = 'D:\\papers_classification\\pyhanlp\\tests\\categories\\categories for test_2.txt'#标签文件的绝对路径
# ResultPath='/Users/yuchengqi/PycharmProjects/Text_segmentation/EnglishResult.txt'#输出结果的路径
ResultPath = 'D:\\papers_classification\\pyhanlp\\tests\\output_eng\\EnglishResult.txt'
res=Classify(StopList,FilePath,SaveDictPath,LabelPath)#res为输出结果
fresult = open(ResultPath,'w')
fresult.write('\n'.join(res))
fresult.close()
| [
"392889690@qq.com"
] | 392889690@qq.com |
e8e5a6297fcf473afd41fcf34dcfa7c432b7440c | 6423a8158e9ed969166e264b70e411bedee8cd90 | /src/dispatch/workflow/models.py | 3856b2853a8115f82684a82f0ac8560ac625f5f4 | [
"Apache-2.0"
] | permissive | lf2foce/dispatch | de7967a9dea07fc64c6edbff733a5f072569ba41 | abb767dfc743665f25e0f3cffbdacee71e390838 | refs/heads/master | 2023-06-19T05:07:42.722020 | 2021-07-15T01:28:35 | 2021-07-15T01:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,591 | py | from datetime import datetime
from enum import Enum
from typing import List, Optional
from pydantic import validator
from sqlalchemy.orm import relationship, backref
from sqlalchemy import Column, ForeignKey, Integer, String, JSON, Table
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from sqlalchemy.sql.sqltypes import Boolean
from sqlalchemy_utils import TSVectorType
from dispatch.database.core import Base
from dispatch.document.models import DocumentCreate
from dispatch.models import DispatchBase, ResourceBase, ResourceMixin, TimeStampMixin, ProjectMixin
from dispatch.participant.models import ParticipantRead
from dispatch.plugin.models import PluginInstance, PluginInstanceRead
from dispatch.project.models import ProjectRead
class WorkflowInstanceStatus(str, Enum):
submitted = "submitted"
created = "created"
running = "running"
completed = "completed"
failed = "failed"
def __str__(self) -> str:
return str.__str__(self)
# Association tables for many to many relationships
assoc_workflow_instances_artifacts = Table(
"workflow_instance_artifact",
Base.metadata,
Column("document_id", Integer, ForeignKey("document.id", ondelete="CASCADE")),
Column("workflow_instance_id", Integer, ForeignKey("workflow_instance.id", ondelete="CASCADE")),
PrimaryKeyConstraint("document_id", "workflow_instance_id"),
)
assoc_workflow_incident_priorities = Table(
"workflow_incident_priority",
Base.metadata,
Column("incident_priority_id", Integer, ForeignKey("incident_priority.id", ondelete="CASCADE")),
Column("workflow_id", Integer, ForeignKey("workflow.id", ondelete="CASCADE")),
PrimaryKeyConstraint("incident_priority_id", "workflow_id"),
)
assoc_workflow_incident_types = Table(
"workflow_incident_type",
Base.metadata,
Column("incident_type_id", Integer, ForeignKey("incident_type.id", ondelete="CASCADE")),
Column("workflow_id", Integer, ForeignKey("workflow.id", ondelete="CASCADE")),
PrimaryKeyConstraint("incident_type_id", "workflow_id"),
)
assoc_workflow_terms = Table(
"workflow_term",
Base.metadata,
Column("term_id", Integer, ForeignKey("term.id", ondelete="CASCADE")),
Column("workflow_id", Integer, ForeignKey("workflow.id", ondelete="CASCADE")),
PrimaryKeyConstraint("term_id", "workflow_id"),
)
class Workflow(Base, ProjectMixin, TimeStampMixin):
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
enabled = Column(Boolean, default=True)
parameters = Column(JSON, default=[])
resource_id = Column(String)
plugin_instance_id = Column(Integer, ForeignKey(PluginInstance.id))
plugin_instance = relationship(PluginInstance, backref="workflows")
instances = relationship("WorkflowInstance", backref="workflow")
incident_priorities = relationship(
"IncidentPriority", secondary=assoc_workflow_incident_priorities, backref="workflows"
)
incident_types = relationship(
"IncidentType", secondary=assoc_workflow_incident_types, backref="workflows"
)
terms = relationship(
"Term", secondary=assoc_workflow_terms, backref=backref("workflows", cascade="all")
)
search_vector = Column(TSVectorType("name", "description"))
class WorkflowInstance(Base, ResourceMixin):
id = Column(Integer, primary_key=True)
workflow_id = Column(Integer, ForeignKey("workflow.id"))
parameters = Column(JSON, default=[])
run_reason = Column(String)
creator_id = Column(Integer, ForeignKey("participant.id"))
incident_id = Column(Integer, ForeignKey("incident.id", ondelete="CASCADE"))
creator = relationship(
"Participant", backref="created_workflow_instances", foreign_keys=[creator_id]
)
status = Column(String, default=WorkflowInstanceStatus.submitted)
artifacts = relationship(
"Document", secondary=assoc_workflow_instances_artifacts, backref="workflow_instance"
)
# Pydantic models...
class WorkflowBase(DispatchBase):
name: str
resource_id: str
plugin_instance: Optional[PluginInstanceRead]
parameters: Optional[List[dict]] = []
enabled: Optional[bool]
description: Optional[str]
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
class WorkflowCreate(WorkflowBase):
project: ProjectRead
class WorkflowUpdate(WorkflowBase):
id: int
class WorkflowRead(WorkflowBase):
id: int
@validator("description", pre=True, always=True)
def set_description(cls, v, values):
"""Sets the description"""
if not v:
return "No Description"
return v
class WorkflowNested(WorkflowRead):
pass
class WorkflowPagination(DispatchBase):
total: int
items: List[WorkflowRead] = []
class WorkflowInstanceBase(ResourceBase):
artifacts: Optional[List[DocumentCreate]] = []
created_at: Optional[datetime] = None
parameters: Optional[List[dict]] = []
run_reason: Optional[str]
status: Optional[WorkflowInstanceStatus]
updated_at: Optional[datetime] = None
class WorkflowInstanceCreate(WorkflowInstanceBase):
creator: dict # TODO define a required email
incident: dict # TODO define a required ID
workflow: dict # TODO define a required ID
class WorkflowInstanceUpdate(WorkflowInstanceBase):
pass
class WorkflowInstanceRead(WorkflowInstanceBase):
id: int
workflow: WorkflowRead
creator: ParticipantRead
class WorkflowInstancePagination(DispatchBase):
total: int
items: List[WorkflowInstanceRead] = []
| [
"noreply@github.com"
] | lf2foce.noreply@github.com |
ed44d233076771592fb61c833f24eaab5bddb06f | 0e9dd8474d4ca407bc9899659166f85f714dca97 | /python_stack/django/django_orm/Books_Authors/Books_Authors/urls.py | 2061c9991668199be42e3c6ca29e0ce696943ebc | [] | no_license | Nouf-Al/Nouf_Alotaibi | 4ff99c100a8034e397b7f6180a48105b22877126 | b9f8c441b5c3524fcba42805efdfd4017429d361 | refs/heads/master | 2023-02-18T17:40:01.287477 | 2021-01-21T16:20:07 | 2021-01-21T16:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | """Books_Authors URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('books_authors_app.urls')),
]
| [
"nouffawazz@gmai.com"
] | nouffawazz@gmai.com |
a6ebbfdd7abd12b3483b92d4d69893efc0b3bf8f | 02d5773c757d4155d0973474520c9c0f45ca28ff | /gis_test_of_test/urls.py | 263252981a63d7e4df441aa15c063e507d4d65b1 | [] | no_license | SeoMinJong/gis_test_of_test | b3c9b9e8dc78558eca3dbaa7112ce754106fb07e | bf5c0b2e2d5e6d5f9cd2a8049bd25550685026a7 | refs/heads/master | 2023-08-22T22:29:21.431424 | 2021-10-08T11:09:49 | 2021-10-08T11:09:49 | 402,606,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | """gis_test_of_test URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from articleapp.views import ArticleListView
urlpatterns = [
path('', ArticleListView.as_view(), name='home'),
path('admin/', admin.site.urls),
path('accounts/', include('accountapp.urls')),
path('profiles/', include('profileapp.urls')),
path('articles/', include('articleapp.urls')),
path('comments/', include('commentapp.urls')),
path('projects/', include('projectapp.urls')),
path('subscribes/', include('subscribeapp.urls')),
path('like/', include('likeapp.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"tjalswhd113@naver.com"
] | tjalswhd113@naver.com |
3c5977f94759a7568e0426be77ac326392c2d679 | aad9213214fa2a8dad849db5dbf8d708fa48b64b | /machine_learining/3决策树.py | 2c050804252b551258a3dda61f5ca1e718d4f183 | [
"MIT"
] | permissive | qq453388937/data_mining_faith | 7e738e0f939763f3cb69400bac4f89c03a40bf62 | eec416e2d61e9aec358175704a7508295cea4218 | refs/heads/master | 2020-03-28T23:48:41.637968 | 2018-10-12T04:04:47 | 2018-10-12T04:04:47 | 149,309,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,874 | py | # -*- coding:utf-8 -*-
"""
决策树 每个类别的概率 乘以 log 每个类别的概率
H = -(1/32 * log 1/32 + ...) = -log 1/32 = 5
信息熵: 信息和消除不确定性是相联系的
信息增益: 得知某个特征对总的信息熵减少的大小
减少越大,可以放在树的顶部
H(D) = -(6/15*log(6/15)+9/15*log(9/15)) = 0.971
g(D,A) = H(D) - H(D|A)
计算信息增益:
g(D,年龄) = 0.971 -[5/15H(青年)+5/15H(中年)+5/15H(老年)] = 0.313
H(青年) = -(3/5log(3/5)+2/5(log(2/5)))
H(中年) = -(3/5log(3/5)+2/5(log(2/5)))
H(老年) = -(4/5log(4/5)+1/5(log(1/5)))
g(D,有工作) = 0.324
g(D,有自己房子) = 0.420
g(D,贷款情况) = 0.363
"""
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
def decision_tree():
""" 决策树预测乘客分类(过拟合) """
# 1. 获取乘客数据
data = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
# 2. 找出特征值,目标值
x = data[['pclass', 'sex', 'age']]
y = data['survived']
# 3. 缺失值处理, 特征类别数据处理 --> one-hot 编码
x['age'].fillna(x['age'].mean(), inplace=True)
dv = DictVectorizer(sparse=False)
# [["1st","2","female"],[]]--->[{"pclass":, "age":2, "sex: female"}, ]
x = dv.fit_transform(x.to_dict(orient="records")) # --> one-hot 编码
print(dv.get_feature_names())
print(x)
# 3.5 分割数据集 可分可不分
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# # 4.决策树接口预测生存分类
ds = DecisionTreeClassifier()
ds.fit(x_train, y_train)
print(ds.predict(x))
print("预测测试集当中的结果:", ds.predict(x)[:50]) # 预测测试集的文档类别,用测试集来分类
print("测试集当中的真实结果", y_test[:50])
print("决策树预测的准确率", ds.score(x_test, y_test))
# pass
# export_graphviz(ds, out_file="./tree.dot",
# feature_names=['age', 'pclass=1st', 'pclass=2nd', 'pclass=3rd', 'sex=female', '男性'])
# 4 决策森林
rf = RandomForestClassifier()
# 构造超参数字典
param = {
"n_estimators": [1, 3, 5, 7, 10],
"max_depth": [5, 8, 12, 15],
"min_samples_split": [2, 3, 5]
}
gc = GridSearchCV(rf, param_grid=param, cv=2)
gc.fit(x_train, y_train)
print("随机森林的准确率", gc.score(x_test, y_test))
print("交叉验证的结果最佳参数", gc.best_estimator_)
if __name__ == '__main__':
decision_tree()
| [
"453388937@qq.com"
] | 453388937@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.