blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
368d3a8e796916e2e6cadcf0e634c978a7ef2699
|
ae7ba9c83692cfcb39e95483d84610715930fe9e
|
/martinkersner/train-CRF-RNN/crfasrnn.py
|
472a86f853e687aa057de49dcc8aa8957733f63d
|
[] |
no_license
|
xenron/sandbox-github-clone
|
364721769ea0784fb82827b07196eaa32190126b
|
5eccdd8631f8bad78eb88bb89144972dbabc109c
|
refs/heads/master
| 2022-05-01T21:18:43.101664
| 2016-09-12T12:38:32
| 2016-09-12T12:38:32
| 65,951,766
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,462
|
py
|
#!/usr/bin/env python
# Martin Kersner, m.kersner@gmail.com
# 2016/03/03
from __future__ import print_function
caffe_root = '../caffe-crfrnn/'
import sys
sys.path.insert(0, caffe_root + 'python')
import os
import cPickle
import logging
import numpy as np
import pandas as pd
from PIL import Image as PILImage
import cStringIO as StringIO
import caffe
import matplotlib.pyplot as plt
from utils import palette_demo
# TODO concatenate input and output image
def main():
iteration, image_paths = process_arguments(sys.argv)
if iteration:
prototxt = 'TVG_CRFRNN_COCO_VOC_TEST_3_CLASSES.prototxt'
model = 'models/train_iter_{}.caffemodel'.format(iteration)
else:
prototxt = 'TVG_CRFRNN_COCO_VOC.prototxt'
model = 'TVG_CRFRNN_COCO_VOC.caffemodel'
if not exist_model(model, prototxt):
help()
# default images (part of http://www.cs.berkeley.edu/~bharath2/codes/SBD/download.html)
if not image_paths:
image_paths.append('images/2007_005844.png') # chair
image_paths.append('images/2008_007811.png') # bottle
image_paths.append('images/2007_002094.png') # bird
palette = palette_demo()
net = caffe.Segmenter(prototxt, model, True)
for path in image_paths:
image, cur_h, cur_w = preprocess_image(path)
if image == None:
print(path + ' does not exist! Skipping.' , file=sys.stderr)
continue
print('Processing ' + path + '...', end='')
segmentation = net.predict([image])
segm_post = postprocess_label(segmentation, cur_h, cur_w, palette)
plt.imshow(segm_post)
plt.savefig(create_label_name(path))
print('finished.')
def preprocess_image(image_path):
if not os.path.exists(image_path):
return None, 0, 0
input_image = 255 * caffe.io.load_image(image_path)
image = PILImage.fromarray(np.uint8(input_image))
image = np.array(image)
mean_vec = np.array([103.939, 116.779, 123.68], dtype=np.float32)
reshaped_mean_vec = mean_vec.reshape(1, 1, 3);
im = image[:,:,::-1]
im = im - reshaped_mean_vec
# Pad as necessary
cur_h, cur_w, cur_c = im.shape
pad_h = 500 - cur_h
pad_w = 500 - cur_w
im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode = 'constant', constant_values = 0)
return im, cur_h, cur_w
def postprocess_label(segmentation, cur_h, cur_w, palette):
segmentation2 = segmentation[0:cur_h, 0:cur_w]
output_im = PILImage.fromarray(segmentation2)
output_im.putpalette(palette)
return output_im
def create_label_name(orig_path):
return 'label_' + os.path.splitext(os.path.basename(orig_path))[0] + '.png'
def exist_model(model, prototxt):
if not os.path.exists(model):
print('Model ' + model + ' does not exist! Exiting.', file=sys.stderr)
return False
elif not os.path.exists(prototxt):
print('Prototxt' + prototxt + ' does not exist! Exiting.', file=sys.stderr)
return False
return True
def process_arguments(argv):
num_args = len(argv)
iteration = None
image_paths = []
if num_args == 2:
iteration = argv[1]
elif num_args > 2:
iteration = argv[1]
for name in argv[2:]:
image_paths.append(name)
return iteration, image_paths
def help():
print('Usage: python crfasrnn.py [ITERATION_NUM [IMAGE, IMAGE, ...]\n'
'ITERATION_NUM denotes iteration number of model which shall be run.\n'
'IMAGE one or more images can be passed as arguments.'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
|
[
"xenron@outlook.com"
] |
xenron@outlook.com
|
f0ee9e58c7029957af4366c984e9861db7dfc8e2
|
6ffed3351dd608ce311a7c04d813e4cbcc283d6f
|
/djongo_project/files/api/serializers.py
|
e0c2ca3d77901c61c0d12799e99a053ec5653fad
|
[] |
no_license
|
navill/ai_2-1
|
bc2311dd5e8839c6bd4112965b7348252408785a
|
7dd782b93286b7a407c433aa55a3b8f3f626fe89
|
refs/heads/master
| 2023-01-06T19:09:26.051130
| 2020-10-07T02:08:15
| 2020-10-07T02:08:15
| 290,131,119
| 0
| 0
| null | 2020-10-08T14:45:37
| 2020-08-25T06:20:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
from typing import *
from rest_framework import serializers
from rest_framework.reverse import reverse
from accounts.models import CommonUser
from utilities.file_utils import EncryptHandler
from files.models import CommonFile
class FileManageSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=CommonUser.objects.all(), required=False)
patient_name = serializers.CharField(required=True)
file = serializers.FileField(use_url=False)
created_at = serializers.DateTimeField(read_only=True)
class Meta:
model = CommonFile
fields = ['user', 'patient_name', 'file', 'created_at']
read_only_fields = ['user']
def to_representation(self, instance: CommonFile) -> Dict:
ret = super().to_representation(instance)
encrypted_path = self._create_encrypted_path(str(instance.id))
encrypted_pull_url = reverse('files:download', args=[encrypted_path], request=self.context['request'])
ret['url'] = encrypted_pull_url
return ret
def create(self, validated_data: dict) -> CommonFile:
try:
file_obj = CommonFile.objects.create(**validated_data)
except Exception:
raise
return file_obj
def _create_encrypted_path(self, instance_id: str) -> str:
handler = EncryptHandler(instance_id)
return handler.encrypt()
|
[
"blue_jihoon@naver.com"
] |
blue_jihoon@naver.com
|
98648afb1168eb37ec156f7995980e16668f754b
|
276be25104b8dd68aa45fb6bb5d0ff289dcad539
|
/testing/Belinda/app2.py
|
9602db1abe2ffd2f7da83ed45f26ed450721f7d9
|
[] |
no_license
|
CC196/Project-2
|
77ea2411607e84f9b75d55f1bfd0cd2b2240d553
|
fd3ca26703468443fec889e0e6e2d64dfd8214eb
|
refs/heads/main
| 2023-01-07T00:31:52.129842
| 2020-11-11T04:05:20
| 2020-11-11T04:05:20
| 302,524,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,321
|
py
|
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# from flask_cors import CORS
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///D:/Course_work/Project2/Project-2/Resources/Spotify.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Top_fifty = Base.classes.top50_detail
All_year_top = Base.classes.top50_2019
world = Base.classes.SpotifyTopSongsByCountry
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# cors = CORS(app)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/top_50_2020_May<br/>"
f"/api/v1.0/top_50_2019<br>"
f"/api/v1.0/top50_country_2020_May"
)
@app.route("/api/v1.0/top_50_2020_May")
def May():
session = Session(engine)
results = session.query(Top_fifty.Title, Top_fifty.popularity,Top_fifty.acousticness,Top_fifty.danceability,Top_fifty.energy, Top_fifty.instrumentalness,Top_fifty.loudness,Top_fifty.speechiness,Top_fifty.valence, Top_fifty.tempo).all()
session.close()
all_song = []
count = 1
for Title,popularity,acousticness,danceability,energy,instrumentalness, loudness, speechiness, valence, tempo in results:
if Title:
song_detail = {}
song_detail["Title"] = Title
song_detail["popularity"] = popularity
song_detail["acousticness"] = acousticness
song_detail["danceability"] = danceability
song_detail["energy"] = energy
song_detail["instrumentalness"] = instrumentalness
song_detail["loudness"] = loudness
song_detail["speechiness"] = speechiness
song_detail["valence"] = valence
song_detail["tempo"] = tempo
song_detail["rank_in_May"] = count
count+=1
all_song.append(song_detail)
return jsonify(all_song)
@app.route("/api/v1.0/top_50_2019")
def all_year():
session = Session(engine)
result = session.query(All_year_top.Title, All_year_top.Popularity,All_year_top.Acousticness,All_year_top.Danceability,All_year_top.Energy, All_year_top.Liveness,All_year_top.Loudness,All_year_top.Speechiness,All_year_top.Valence, All_year_top.BPM, All_year_top.Rank).all()
session.close()
all_song = []
for Title,popularity,acousticness,danceability,energy,liveness, loudness, speechiness, valence, BPM, rank in result:
if Title:
song_detail = {}
song_detail["Title"] = Title
song_detail["popularity"] = popularity
song_detail["acousticness"] = acousticness
song_detail["danceability"] = danceability
song_detail["energy"] = energy
song_detail["liveness"] = liveness
song_detail["loudness"] = loudness
song_detail["speechiness"] = speechiness
song_detail["valence"] = valence
song_detail["BPM"] = BPM
song_detail["Rank"] = rank
all_song.append(song_detail)
return jsonify(all_song)
@app.route("/api/v1.0/top50_country_2020_May")
def world_May():
session = Session(engine)
world_top = session.query(world.Country, world.Rank, world.Title, world.Artists, world.Album).all()
session.close()
all_world = []
country_rank = {}
for country, rank, title, artists, album in world_top:
if country not in country_rank:
country_rank[country] = []
song_detail = {}
song_detail["Rank"] = rank
song_detail["Title"] = title
song_detail["Artists"] = artists
song_detail["Album"] = album
country_rank[country].append(song_detail)
all_world.append(country_rank)
return jsonify(all_world)
if __name__ == '__main__':
app.run(debug=True)
|
[
"qinqin.zha@gmail.com"
] |
qinqin.zha@gmail.com
|
0ab766f09b072cf8494f45960dfbd183965b6d10
|
6b971e3401fba1498d8b4f3e1b4a46971ca6d0a9
|
/examples/scroll_area_dynamic_content_layouts.py
|
82af4fc9d3357c0a602b0d880ee3a28e95f86fec
|
[] |
no_license
|
brent-stone/PySide6
|
1e41d76363343bfd238c60a93e11b4a9ac58d57c
|
2927ddbba51b677e4a0eb502b287b8a8d9e964ad
|
refs/heads/main
| 2023-08-14T23:09:29.669245
| 2021-09-05T00:50:41
| 2021-09-05T00:50:41
| 403,183,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
from PySide6.QtWidgets import *
import sys
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self, None)
self.setWindowTitle("My GUI")
self.resize(300, 400)
self.scroll = QScrollArea()
self.scroll.setWidgetResizable(True) # CRITICAL
self.inner = QFrame(self.scroll)
self.inner.setLayout(QVBoxLayout())
self.scroll.setWidget(self.inner) # CRITICAL
self.scroll_layout_dict = {}
b = QPushButton(self.inner)
b.setText("Populate")
b.clicked.connect(self.populate)
self.inner.layout().addWidget(b)
# When creating MainWindow() from scratch like this,
# it's necessary to tell PySide6 which widget is
# the 'central' one for the MainWindow().
self.setCentralWidget(self.scroll)
self.show()
def populate(self):
for i in range(10):
b = QPushButton(self.inner)
b.setText(str(i))
b.clicked.connect(self.del_button)
checkbox = QCheckBox(f"Check {i}!", self.inner)
new_layout = QHBoxLayout(self.inner)
new_layout.addWidget(b)
new_layout.addWidget(checkbox)
n = self.inner.layout().count()
self.inner.layout().insertLayout(n, new_layout)
self.scroll_layout_dict[b] = new_layout
def del_button(self):
button: QPushButton = self.sender()
layout: QVBoxLayout = self.scroll_layout_dict[button]
while layout.count() > 0:
layout.takeAt(0).widget().deleteLater()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec())
|
[
"brent.jk.stone@gmail.com"
] |
brent.jk.stone@gmail.com
|
c7198b3709f26fddaf360a7db559d549f4583b89
|
a65e5dc54092a318fc469543c3b96f6699d0c60b
|
/Personel/Sandesh/Python/23feb/reverse_string.py
|
5666f30c06762d1e26e632121ba09969fd9dc1f1
|
[] |
no_license
|
shankar7791/MI-10-DevOps
|
e15bfda460ffd0afce63274f2f430445d04261fe
|
f0b9e8c5be7b28298eb6d3fb6badf11cd033881d
|
refs/heads/main
| 2023-07-04T15:25:08.673757
| 2021-08-12T09:12:37
| 2021-08-12T09:12:37
| 339,016,230
| 1
| 0
| null | 2021-08-12T09:12:37
| 2021-02-15T08:50:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 96
|
py
|
string = input("Plearse enter a string : ")
print("Reverse of string is : ")
print(string[::-1])
|
[
"sandeshpatekar20@gmail.com"
] |
sandeshpatekar20@gmail.com
|
7b6b855791edd206f5db635b20e927ab22d10a3d
|
d670ceefd62a673d7f071ad8255ea1ced2d1be1f
|
/python/example/udp_client.py
|
424b47e0953f4aab5a8c34f7f73840dff577d7b1
|
[] |
no_license
|
wang-jinfeng/bigdata
|
2bfaecefd4aaac83c81e1dbce44cd596da717673
|
b031a5fb2d06c7a2c2942a19d8c818f50ec9b8f6
|
refs/heads/master
| 2022-12-01T22:55:23.425409
| 2021-01-20T10:08:56
| 2021-01-20T10:08:56
| 199,104,425
| 0
| 0
| null | 2022-11-16T01:56:42
| 2019-07-27T02:18:29
|
Scala
|
UTF-8
|
Python
| false
| false
| 294
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for data in [b'Michael', b'Tracy', b'Sarah']:
# 发送数据:
s.sendto(data, ('127.0.0.1', 9998))
# 接收数据:
print(s.recv(1024).decode('utf-8'))
s.close()
|
[
"jinfeng.wang@mobvista.com"
] |
jinfeng.wang@mobvista.com
|
f5586a51b83a59377fbf0ba3e1c1f7cbfca452e3
|
0fd0495f194bc22f0322d02ebabe8add9bf5814c
|
/python_concept_references/excel.py
|
a0cd93b32a6802edc9ce767d467a37f6082774d4
|
[] |
no_license
|
WorldPierce/Automate_the_Boring_Stuff_With_Python
|
bd6982bddefa712598dc5e6eb4cf2a2aa87b2c1f
|
a0bc6ba49d92b25f4fbe8d4fdd9385f294567e4c
|
refs/heads/master
| 2020-12-30T09:58:38.341981
| 2017-08-05T19:51:10
| 2017-08-05T19:51:10
| 99,248,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
# install openpyxl to open spreadsheets
import openpyxl, os
os.chdir('c:\\wffpi\\documents')
workbook = openpyxl.load_workbook('example.xlsx')
type(workbook)
sheet = workbook.get_sheet_by_name('Sheet1')
type(sheet)
workbook.get_sheet_names()
cell = sheet['A1'] # gets cell object for cell A 1
cell.value # gives you cell value
str(sheet[A1].value) # prints string value of any cell
sheet.cell(row=1, column=2) # returns cell object same as sheet['B1']
# useful for loops so you don't convert to A, B etc..
for i in range(1,8):
print(i, sheet.cell(row=i, column=2).value)
|
[
"bildo316@gmail.com"
] |
bildo316@gmail.com
|
6849e3aa107e6582238894eb5ace39b380d9102b
|
74ddb61b608bf47b1320a3a66a13c0896bff4444
|
/samples/openapi3/client/petstore/python-experimental/petstore_api/models/whale.py
|
f39c28174404f4f3c268c11125743f22f796eebc
|
[
"Apache-2.0"
] |
permissive
|
ShakedH/openapi-generator
|
616f873dc29edf49e44c4685ebb7f46184ce62fd
|
07647b1a310410a28c95f8b4a9661c0ddeaf1db8
|
refs/heads/master
| 2022-09-08T07:40:44.550163
| 2020-05-28T13:43:38
| 2020-05-28T13:43:38
| 267,639,665
| 0
| 0
|
Apache-2.0
| 2020-05-28T16:26:44
| 2020-05-28T16:26:44
| null |
UTF-8
|
Python
| false
| false
| 6,430
|
py
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class Whale(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@cached_property
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'class_name': (str,), # noqa: E501
'has_baleen': (bool,), # noqa: E501
'has_teeth': (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_name': 'className', # noqa: E501
'has_baleen': 'hasBaleen', # noqa: E501
'has_teeth': 'hasTeeth', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, class_name, _check_type=True, _spec_property_naming=False, _path_to_item=(), _configuration=None, _visited_composed_classes=(), **kwargs): # noqa: E501
"""whale.Whale - a model defined in OpenAPI
Args:
class_name (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
has_baleen (bool): [optional] # noqa: E501
has_teeth (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_name = class_name
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
[
"noreply@github.com"
] |
ShakedH.noreply@github.com
|
8f948f5da2515448b700d72a799435448074270c
|
7eeb873b01b1da3401d5b8a802438bcc0e22de22
|
/61_Sets/main.py
|
447df8aecab79571c7d8207257ce7002d3b8894b
|
[] |
no_license
|
Bhushan2581/Python-tuts
|
ed5f1606f23aa1d4370d4ed2e4171c25cfc1f206
|
4f2acc839874fcbc3407ba1e0dc6e2d44d6e6179
|
refs/heads/master
| 2022-07-29T22:38:17.831404
| 2020-05-17T03:48:24
| 2020-05-17T03:48:24
| 264,577,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
pythonlist = ["C", "Python", "Java"]
print(pythonlist[1])
pythonlist = ["C", "Python", "Java"]
print(pythonlist[-1])
pythonlist = ["java", "C", "C++", "PHP", "HTML", "Ruby", "C#"]
print(pythonlist[2:5])
pythonlist = ["java", "C", "C++", "PHP", "HTML", "Ruby", "C#"]
pythonlist[1] = "Laravel"
print(pythonlist)
pythonlist = ["Java", "C", "Python"]
for x in tpythonlistislist:
print(x)
|
[
"noreply@github.com"
] |
Bhushan2581.noreply@github.com
|
3e2af68956ab395d7d6c3ee1a4238c837c4b51cc
|
b2472967910be9c12576f0f97d33bca0576a8667
|
/atcoder-old/2019/0901_abc139/d.py
|
0f4f619ee2e61c90c3c79b0e44b9d3a7b51c02c3
|
[] |
no_license
|
ykmc/contest
|
85c3d1231e553d37d1235e1b0fd2c6c23f06c1e4
|
69a73da70f7f987eb3e85da503ea6da0744544bd
|
refs/heads/master
| 2020-09-01T22:56:10.444803
| 2020-07-14T11:36:43
| 2020-07-14T11:36:43
| 217,307,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# Python3 (3.4.3)
import sys
input = sys.stdin.readline
# -------------------------------------------------------------
# function
# -------------------------------------------------------------
# -------------------------------------------------------------
# main
# -------------------------------------------------------------
N = int(input())
# i % i+1 = i が最善, あまりは 1 〜 N-1
print(N*(N-1)//2)
|
[
"34961813+ykmc@users.noreply.github.com"
] |
34961813+ykmc@users.noreply.github.com
|
12770caef27bc3fef998f1a1f8d9898aefccdd3f
|
aee126bb9fae134662e0ca2eab4c8d7483ab3ec6
|
/server/flaskApp/templates/app.py
|
ba8c62f877a5dff068fa88fd4b7552ad2099ed36
|
[] |
no_license
|
collier-watkins/DeafDoorbell
|
3479cc5da661b10c53981a30775ec28cfcb1524e
|
c8d8601a159a1469ccec3c9884012dc5d0b872f7
|
refs/heads/master
| 2020-05-04T01:09:32.807947
| 2019-05-27T01:41:53
| 2019-05-27T01:41:53
| 178,899,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
from flask import Flask, render_template, url_for, flash, request
from forms import MessageForm
from wtforms.widgets import html_params, HTMLString
from flask_socketio import SocketIO
#import subprocess
app = Flask(__name__)
app.config['SECRET_KEY'] = '3985723043u208uj23022039rue'
socketio = SocketIO(app)
client1IP = "192.168.0.16"
#Home Page
@app.route("/", methods=['GET', 'POST']) #Related to website locations
def homePage(): #Returns data for the main home page, should be HTML data
form = MessageForm()
JoysRoom = False
UpstairsBathroom = False
if request.method == "POST":
locations = request.form.getlist('location')
if u'Upstairs Bathroom' in locations : UpstairsBathroom = True
if u'Joys Room' in locations : JoysRoom = True
if form.validate_on_submit():
#THIS IS WHAT HAPPENS WHEN THE SUBMIT BUTTON IS PRESSED
message = request.form.get("LCDMessage")
app.logger.warning('Submit happened!')
app.logger.warning(message)
app.logger.warning("Joy\'s Room: " + str(JoysRoom))
app.logger.warning("Upstairs Bathroom: " + str(UpstairsBathroom))
######Send message to LCD and do GPIO stuff here #########
#subprocess.run()
#####################
return render_template('home.html', title='Blog Posts', form=form)
def messageReceived(methods=['GET', 'POST']):
print('message was received!!!')
@socketio.on('submitHit')
def handle_submit_event(json, methods=['GET', 'POST']):
print('received my event: ' + str(json))
socketio.emit('my response', json, callback=messageReceived)
#About Page
@app.route("/about")
def aboutPage():
return "<h1>About Page</h1>"
if __name__ == '__main__':
#Run Flask Application
socketio.run(app, debug=True, host='0.0.0.0', port=80)
|
[
"collier.watkins.chc@gmail.com"
] |
collier.watkins.chc@gmail.com
|
71ca8542c4f7c61d9328341ec14b583e4d30c82d
|
e4920c4fe4290bde524e0c141189f80fddfe44b7
|
/info/migrations/0023_remove_message_recaptcha.py
|
85b4c72c01e9b2dc98df1f027f92d51e1b879720
|
[] |
no_license
|
ShahadatShuvo/Django_portfolio
|
42af2b0aa686bff08730cdb105f95d6b63adb620
|
795ed7cbb6444245af08582ea63f57a0f32679a0
|
refs/heads/master
| 2023-05-30T01:43:50.409584
| 2021-06-14T08:29:18
| 2021-06-14T08:29:18
| 376,372,632
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# Generated by Django 2.2.16 on 2020-12-22 17:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('info', '0022_auto_20201222_1655'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='recaptcha',
),
]
|
[
"shahadat@baiust.edu.bd"
] |
shahadat@baiust.edu.bd
|
d2d223dd5e9968b7af2c5e4232ccc82f887bf845
|
d3709ef033ddf881449c515936b448370a2625ee
|
/leetcode/058.Length of Last Word/common.py
|
58b6c30db0277230261076d8bd602b61eac1e818
|
[] |
no_license
|
xjhuang1993/PythonExercise
|
e7d7b12497986798184fa78a4e44764961237a56
|
444db0876295ee2dca2bd2c6103eb5fca4d643be
|
refs/heads/master
| 2020-03-19T01:03:26.988279
| 2018-06-06T01:53:35
| 2018-06-06T01:53:35
| 135,518,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
"""
Accepted
"""
def func(s):
if not s:
return 0
result_list = s.strip().split(" ")
result = len(result_list[-1])
return result
if __name__ == "__main__":
s = "Hello World"
print(func(s))
|
[
"398478432@qq.com"
] |
398478432@qq.com
|
b483194ef410722c853543abfba98d1e36b2fa5b
|
1c2ae618653041b4dc739ad550f1749f2f5eeb81
|
/James-Script/Discovery/smtp-enum.py
|
cd88f4eb98a9ea6388fd0b7210f6882883b2a028
|
[] |
no_license
|
ubriggsl/OSEC-PWK
|
b50a5b1c8902fafb217d6766a960de90008d0af4
|
64c2dc943fde87457b835c98c6d409689d20bf3c
|
refs/heads/master
| 2021-01-20T00:08:29.484029
| 2017-05-15T02:33:14
| 2017-05-15T02:33:14
| 89,084,644
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
#!/usr/bin/python
import argparse
import socket
import sys
a = argparse.ArgumentParser()
a.add_argument('user')
a.add_argument('target')
a.add_argument('-p',dest='port',default='25')
args = a.parse_args()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
connect = s.connect((args.target,int(args.port)))
banner = s.recv(1024)
print banner
s.send('VRFY '+ args.user + '\r\n')
result = s.recv(1024)
print result
except:
pass
s.close()
|
[
"james@briggsconsulting.com"
] |
james@briggsconsulting.com
|
4e99a7acd3bcf8e67dc4a408c2fc47b15569d91a
|
72f59717e1e878005e31281c0fd09a4cf0c2f69b
|
/github_2fa.py
|
d85ef49c971ea1ac160bd894b11286db624b60cc
|
[] |
no_license
|
daguy666/api
|
7627daf8f195d97119277c9e3fb219f1e93aeda7
|
c2097813564b63d4034d2f64d0ae3aeea9a220b8
|
refs/heads/master
| 2020-04-21T11:55:46.153919
| 2014-11-06T19:34:04
| 2014-11-06T19:34:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
#!/usr/bin/env python
#
#github_2fa.py
#This will detect users who are in a specified
#org that have 2fa disabled
############################################
import json
import urllib2
import getpass
import sys
pw = getpass.getpass("Please enter a token: ")
org = raw_input("Org to search: ")
org_url = "https://api.github.com/orgs/%s/members?&filter=2fa_disabled&access_token=%s" % (org, pw)
try:
request = urllib2.Request(org_url)
result = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 400:
print "Bad Request"
elif e.code == 401:
print "Invalid Password"
elif e.code == 404:
print "Invalid org"
else:
print "Call to API failed.", e
sys.exit(1)
twofa_json = json.load(result)
print "Users in the %s Org that do not have two factor auth enabled: " % org
for entry in twofa_json:
print entry['login']
|
[
"jpistonejr@gmail.com"
] |
jpistonejr@gmail.com
|
9e7202b2d72c068da23e38e2765dd64abbe128ce
|
52953b88fd65e64d13c0b8ebeae5d468305c01ea
|
/schema.py
|
cc90f9184c3d88b61fbb852d2641f19a86a0a955
|
[] |
no_license
|
antdood/EmojiCounter
|
ed77a77d032bda93e4f4cffc1145a5f74d90d7d3
|
e3aace43cf7da57eb57f9603ab9c8d2c28762310
|
refs/heads/main
| 2023-03-28T16:26:11.846775
| 2021-04-10T23:59:08
| 2021-04-10T23:59:08
| 350,259,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
CREATE TABLE emoji_usages
(
channel BIGINT UNSIGNED,
user BIGINT UNSIGNED,
emoji VARCHAR(255),
type ENUM('original', 'custom'),
time TIMESTAMP
);
|
[
"antawck@gmail.com"
] |
antawck@gmail.com
|
94bb8b2a0fb2fd8136b0743980291df09b163012
|
850001831b1fcdd4d27e328b356fc34909ca2917
|
/tests/test_map.py
|
77237924f7d6561097ffea685bfbe60e67c465bc
|
[
"BSD-3-Clause"
] |
permissive
|
yidiq7/pathos
|
b337353ccfe447866c46a4a784a7908c2f3fe31e
|
7e4fef911dc0283e245189df4683eea65bfd90f0
|
refs/heads/master
| 2022-08-24T08:43:34.009115
| 2020-05-27T12:18:21
| 2020-05-27T12:18:21
| 267,310,390
| 0
| 0
|
NOASSERTION
| 2020-05-27T12:14:50
| 2020-05-27T12:14:47
| null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pathos/blob/master/LICENSE
import time
verbose = False
delay = 0.01
items = 100
def busy_add(x,y, delay=0.01):
import time
for n in range(x):
x += n
for n in range(y):
y -= n
time.sleep(delay)
return x + y
def timed_pool(pool, items=100, delay=0.1, verbose=False):
_x = range(-items//2,items//2,2)
_y = range(len(_x))
_d = [delay]*len(_x)
if verbose: print(pool)
start = time.time()
res = pool.map(busy_add, _x, _y, _d)
_t = time.time() - start
if verbose: print("time to queue: %s" % _t)
start = time.time()
_sol_ = list(res)
t_ = time.time() - start
if verbose: print("time to results: %s\n" % t_)
return _sol_
class BuiltinPool(object):
def map(self, *args):
return list(map(*args))
std = timed_pool(BuiltinPool(), items, delay=0, verbose=False)
def test_serial():
from pathos.pools import SerialPool as PS
pool = PS()
res = timed_pool(pool, items, delay, verbose)
assert res == std
def test_pp():
from pathos.pools import ParallelPool as PPP
pool = PPP(servers=('localhost:5653','localhost:2414'))
res = timed_pool(pool, items, delay, verbose)
assert res == std
def test_processing():
from pathos.pools import ProcessPool as MPP
pool = MPP()
res = timed_pool(pool, items, delay, verbose)
assert res == std
def test_threading():
from pathos.pools import ThreadPool as MTP
pool = MTP()
res = timed_pool(pool, items, delay, verbose)
assert res == std
if __name__ == '__main__':
if verbose:
print("CONFIG: delay = %s" % delay)
print("CONFIG: items = %s" % items)
print("")
from pathos.helpers import freeze_support, shutdown
freeze_support()
test_serial()
test_pp()
test_processing()
test_threading()
shutdown()
|
[
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] |
mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df
|
f76b563996fa4bd26228b5de39272a5ff2b2f5bf
|
7407b5a2bfad54bc9767c70c47d205164ffa337f
|
/models/db1.py
|
28d949a5077a8f9c4c8eccedeebe92588ac8d9c0
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
abdulhalim-cu/motor-control-system
|
9397bb191fa22fceb66e88ccdb4ea80de774eb33
|
70d276d63163269c378620a85d46db8374b1a836
|
refs/heads/master
| 2021-01-01T18:31:42.750124
| 2017-08-01T01:51:27
| 2017-08-01T01:51:27
| 98,357,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,416
|
py
|
# -*- coding: utf-8 -*-
db.define_table('Device',
Field('device_id', 'string'),
Field('device_name', 'string'),
Field('model', 'string'),
Field('location', 'string')
)
db.Device.device_id.requires = [IS_NOT_EMPTY(),IS_NOT_IN_DB(db, 'Device.device_id')]
db.Device.device_name.requires = IS_NOT_EMPTY()
db.define_table('User_Device',
Field('user_ref_id', 'reference auth_user'),
Field('device_ref_id', 'reference Device'))
db.define_table('Direction',
Field('direction_type', label='Direction'),
format="%(direction_type)s")
db.define_table('Control_Instruction',
Field('device_ref_id', 'reference Device'),
Field('onoff_flag', 'boolean', notnull=True, label='Motor ON/OFF', comment='* Check for ON & Uncheck for OFF'),
Field('volt_flag', 'string', label='Voltage'),
Field('curr_flag', 'string', label='Current'),
Field('rot_flag', 'string', label='Rotation', comment='* Insert only integer value [revolution per minute]'),
Field('dir_flag', 'reference Direction', label='Direction', requires = IS_IN_DB(db, db.Direction.id,'%(direction_type)s')),
Field('freq_flag', 'string', label='Frequency'),
Field('off_flag', 'boolean', notnull=True, label='Off')
)
db.define_table('Changes',
Field('device_ref_id', 'reference Device'),
Field('change_flag', 'string')
)
db.define_table('Status',
Field('device_ref_id', 'reference Device'),
Field('created', 'datetime'),
Field('last_ping','datetime', requires=IS_NOT_EMPTY()),
Field('server_time','datetime', requires=IS_NOT_EMPTY()))
db.define_table('Device_States',
Field('device_ref_id', 'reference Device'),
Field('on_or_off', 'boolean', notnull=True, label='ON/OFF'),
Field('voltage', 'string'),
Field('current', 'string'),
Field('rotation', 'string'),
Field('direction', 'reference Direction', requires = IS_IN_DB(db, db.Direction.id,'%(direction_type)s')),
Field('frequency', 'string'),
Field('off', 'boolean', notnull=True, label='OFF'))
|
[
"abdulhalim.cu10@gmail.com"
] |
abdulhalim.cu10@gmail.com
|
413948050ddee82de99cfae896398e6cdd3de69b
|
b252c3b7a78b37f8da3d5edca023e2bc38b368d4
|
/python_spider/learn_urllib.py
|
f35177e5ca0fc7636e025d24da4b8e810ec17bae
|
[] |
no_license
|
xianke5200/Python_spider_test
|
868447e48ae136314ba08a7c06440ba27951b201
|
7674ff2b4bcdc35248718d02245e968a5ca61add
|
refs/heads/master
| 2021-12-16T04:18:45.263926
| 2021-12-13T06:01:03
| 2021-12-13T06:01:03
| 234,503,928
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
import urllib
from urllib import request,parse
import ssl
#response = urllib.request.urlopen('http://192.168.1.153/bugfree/index.php/site/login')
#print(response.read().decode('utf-8'))
#context = ssl._create_unverified_context()
url = 'http://192.168.1.153/bugfree/index.php/site/login'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
}
dict = {
#'return_url':'http://192.168.1.153/bugfree/index.php/bug/list/12',
'LoginForm[username]':'chenlue',
'LoginForm[password]':'chenlue',
'LoginForm[language]':'zh_cn',
'LoginForm[rememberMe]':'0'
}
data = bytes(parse.urlencode(dict),'utf-8')
req = request.Request(url,data=data,headers=headers,method='POST')
response = request.urlopen(req,data=data,timeout=1000)
print(response.read().decode('utf-8'))
|
[
"1833907216@qq.com"
] |
1833907216@qq.com
|
8c1baaf7bc0b2f574388dc5f64e36e1918a8a0b1
|
15b3d2c324748b2a9a96813fd7e0919cc4e249ae
|
/dashboard/dashboard/urls.py
|
3645e421490b14c597b18f35ad34efbc033bda4d
|
[] |
no_license
|
pzjc/dashboard-django
|
c1fb6fd7e8fa56ec10d714c0ea1607544f31f5fa
|
bb0dc34fb76cc768c9ef0d5f651a4112ddea73ec
|
refs/heads/master
| 2021-07-01T12:09:53.631999
| 2017-09-22T20:29:34
| 2017-09-22T20:29:34
| 104,480,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
"""dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"jz762@cornell.edu"
] |
jz762@cornell.edu
|
039ddde1464386c3829683361c65b4dcf614072b
|
fe5eb98a1fa9a1bd194e7d859fddc9fc62ed842e
|
/modified_2/run_both_multi.py
|
a58183743d8e5d737bc79c5f36c4bae074557f77
|
[] |
no_license
|
TaehoLi/Capstone
|
862d27a819c455fcfa8da598a976d3bf1e9c9570
|
9329d7910c24d32b04598f373899819694f856e4
|
refs/heads/master
| 2020-04-18T08:11:52.207623
| 2019-12-26T12:04:32
| 2019-12-26T12:04:32
| 167,387,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,279
|
py
|
# -*- coding: utf-8 -*-
import argparse
import logging
import time
import cv2
import numpy as np
import matplotlib.pyplot as plt
#matplotlib auto
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from datetime import datetime
logger1 = logging.getLogger('TfPoseEstimator-WebCam')
logger1.setLevel(logging.DEBUG)
ch1 = logging.StreamHandler()
ch1.setLevel(logging.DEBUG)
formatter1 = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch1.setFormatter(formatter1)
logger1.addHandler(ch1)
logger2 = logging.getLogger('TfPoseEstimator-Video')
logger2.setLevel(logging.DEBUG)
ch2 = logging.StreamHandler()
ch2.setLevel(logging.DEBUG)
formatter2 = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch2.setFormatter(formatter2)
logger2.addHandler(ch2)
fps_time = 0
# codec
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# VideoWriter object
# video resolution should be 640x480(must be same)
#out1 = cv2.VideoWriter('./data/output3.avi', fourcc, 20.0, (640,480))
#out2 = cv2.VideoWriter('./data/output4.avi', fourcc, 20.0, (640,480))
### 1:Webcam / 2:Video
if __name__ == '__main__':
# 1
parser1 = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser1.add_argument('--camera', type=int, default=0)
parser1.add_argument('--resize', type=str, default='432x368',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser1.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser1.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser1.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser1.add_argument('--video', type=str, default=None)
# 2
parser2 = argparse.ArgumentParser(description='tf-pose-estimation Video')
parser2.add_argument('--camera', type=int, default=None)
parser2.add_argument('--video', type=str, default='')
parser2.add_argument('--resize', type=str, default='432x368',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser2.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser2.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser2.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
parser2.add_argument('--showBG', type=bool, default=True, help='False to show skeleton only.')
args1 = parser1.parse_args()
args2 = parser2.parse_args()
logger1.debug('initialization %s : %s' % (args1.model, get_graph_path(args1.model)))
logger2.debug('initialization %s : %s' % (args2.model, get_graph_path(args2.model)))
w1, h1 = model_wh(args1.resize)
w2, h2 = model_wh(args2.resize)
if w1 > 0 and h1 > 0:
e1 = TfPoseEstimator(get_graph_path(args1.model), target_size=(w1, h1))
else:
e1 = TfPoseEstimator(get_graph_path(args1.model), target_size=(432, 368))
if w2 > 0 and h2 > 0:
e2 = TfPoseEstimator(get_graph_path(args2.model), target_size=(w2, h2))
else:
e2 = TfPoseEstimator(get_graph_path(args2.model), target_size=(432, 368))
cap1 = cv2.VideoCapture(args1.camera)
cap2 = cv2.VideoCapture(args2.video)
if cap2.isOpened() is False:
print("Error opening video stream or file")
# data plotting
lstX = []
lstY1 = []
lstY2 = []
threshold = 0.55 # have to change
plt.ion()
fig1 = plt.figure(num='real-time plotting1')
sf1 = fig1.add_subplot(111)
plt.title('Upper Body')
plt.xticks([0, 1500000, 3000000, 4500000, 6000000])
plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
plt.axhline(y=threshold, color='r', linestyle='--', linewidth=2)
line1, = sf1.plot([0, 6000000], [0,1], 'b-')
fig2 = plt.figure(num='real-time plotting2')
sf2 = fig2.add_subplot(111)
plt.title('Lower Body')
plt.xticks([0, 1500000, 3000000, 4500000, 6000000])
plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
plt.axhline(y=threshold, color='r', linestyle='--', linewidth=2)
line2, = sf2.plot([0, 6000000], [0,1], 'b-')
while cap2.isOpened(): # loop
try:
ret_val1, image1 = cap1.read()
ret_val2, image2 = cap2.read()
logger1.debug('image process+')
humans1 = e1.inference(image1, resize_to_default=(w1 > 0 and h1 > 0), upsample_size=args1.resize_out_ratio)
logger2.debug('image process+')
humans2 = e2.inference(image2, resize_to_default=(w2 > 0 and h2 > 0), upsample_size=args2.resize_out_ratio)
### 2:Video) if(--showBG=False) print skeleton
if not args2.showBG:
image2 = np.zeros(image2.shape)
###
logger1.debug('postprocess+')
a = TfPoseEstimator.get_centers(image1, humans1, imgcopy=False) #all points
image1 = TfPoseEstimator.draw_humans(image1, humans1, imgcopy=False)
logger2.debug('postprocess+')
b = TfPoseEstimator.get_centers(image2, humans2, imgcopy=False) #상체 points
c = TfPoseEstimator.get_centers(image2, humans2, imgcopy=False) #하체 points
image2 = TfPoseEstimator.draw_humans(image2, humans2, imgcopy=False)
"""
1) 실시간으로 동영상의 점을 불러온다 (점의 좌표를 알아야함)
2) 실시간으로 웹캠의 점을 불러온다 (점의 좌표를 알아야함)
3) 점 간의 norm(거리)을 구한다 (scalar)
4) 예를 들어 점이 18개로 고정되어 있다면 각 pair점 간의 norm을 전부 구하고
5) sum 하여 그 값을 0과 1사이로 normalization 한다 ->result
6) result를 y축 time을 x축으로 실시간 데이터 plotting
7) result가 어떤 threshold를 넘어설때 마다 warning을 cv2.putText로 출력해준다.
"""
if 0 in b:
del b[0]
if 14 in b:
del b[14]
if 15 in b:
del b[15]
if 16 in b:
del b[16]
if 17 in b:
del b[17] # 눈, 코, 귀 points 제외
if 0 in c:
del c[0]
if 14 in c:
del c[14]
if 15 in c:
del c[15]
if 16 in c:
del c[16]
if 17 in c:
del c[17] # 눈, 코, 귀 points 제외
if 8 in b:
del b[8]
if 9 in b:
del b[9]
if 10 in b:
del b[10]
if 11 in b:
del b[11]
if 12 in b:
del b[12]
if 13 in b:
del b[13] #하체 points 제외
if 1 in c:
del c[1]
if 2 in c:
del c[2]
if 3 in c:
del c[3]
if 4 in c:
del c[4]
if 5 in c:
del c[5]
if 6 in c:
del c[6]
if 7 in c:
del c[7] #상체 points 제외
L2_norm1 = [] #상체
L2_norm2 = [] #하체
L2_nonzero1 = []
L2_nonzero2 = []
for i in range(18):
try:
L2_norm1.append(np.linalg.norm(np.array(a[i])-np.array(b[i]), ord=2))
except:
L2_norm1.append(0.0)
pass
if L2_norm1[i] is not 0.0:
L2_nonzero1.append(L2_norm1[i])
else:
pass
for i in range(18):
try:
L2_norm2.append(np.linalg.norm(np.array(a[i])-np.array(c[i]), ord=2))
except:
L2_norm2.append(0.0)
pass
if L2_norm2[i] is not 0.0:
L2_nonzero2.append(L2_norm2[i])
else:
pass
normalize1 = []
normalize2 = []
if len(L2_nonzero1) is 0:
normalize1.append(0.0)
elif len(L2_nonzero1) is 1:
normalize1.append(0.0)
elif len(L2_nonzero1) is 2:
normalize1.append(0.0)
else:
for i in range(len(L2_nonzero1)):
normalize1.append((L2_nonzero1[i]-min(L2_nonzero1))/(max(L2_nonzero1)-min(L2_nonzero1)))
result1 = np.sum(normalize1)/len(normalize1)
if len(L2_nonzero2) == 0:
normalize2.append(0.0)
elif len(L2_nonzero2) == 1:
normalize2.append(0.0)
elif len(L2_nonzero2) is 2:
normalize2.append(0.0)
else:
for i in range(len(L2_nonzero2)):
normalize2.append((L2_nonzero2[i]-min(L2_nonzero2))/(max(L2_nonzero2)-min(L2_nonzero2)))
result2 = np.sum(normalize2)/len(normalize2)
c = datetime.now()
d = c.strftime('%S%f')
d = np.float32(d) / 10.0
lstX.append(d)
lstY1.append(result1)
lstY2.append(result2)
print("Data point:", result1, result2)
if d > 5900000: # 1분마다 플롯 초기화
fig1.clf()
fig1 = plt.figure(num='real-time plotting1')
sf1 = fig1.add_subplot(111)
plt.title('Upper Body')
plt.xticks([0, 1500000, 3000000, 4500000, 6000000])
plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
plt.axhline(y=threshold, color='r', linestyle='--', linewidth=2)
lstX=[]
lstY1=[]
line1, = sf1.plot([0, 6000000], [0,1], 'b-')
line1.set_data(lstX, lstY1)
fig2.clf()
fig2 = plt.figure(num='real-time plotting2')
sf2 = fig2.add_subplot(111)
plt.title('Lower Body')
plt.xticks([0, 1500000, 3000000, 4500000, 6000000])
plt.yticks([0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
plt.axhline(y=threshold, color='r', linestyle='--', linewidth=2)
lstY2=[]
line2, = sf2.plot([0, 6000000], [0,1], 'b-')
line2.set_data(lstX, lstY2)
else:
line1.set_data(lstX, lstY1)
line2.set_data(lstX, lstY2)
plt.show()
plt.pause(0.0001)
# 임계치 조정
if result1 > threshold and result2 > threshold:
#logger1.debug('show+')
#logger2.debug('show+')
cv2.putText(image1, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image2, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image1, "Wrong Pose", (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 2.35, (0,0,255), 5)
cv2.imshow('tf-pose-estimation Webcam', image1)
cv2.imshow('tf-pose-estimation Video', image2)
## 이미지를 파일에 저장, VideoWriter 객체에 연속적으로 저장하면 동영상이 됨.
#out1.write(image1)
#out2.write(image2)
fps_time = time.time()
elif result1 > threshold:
cv2.putText(image1, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image2, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image1, "Mind Upper", (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 2.35, (0,0,255), 5)
cv2.imshow('tf-pose-estimation Webcam', image1)
cv2.imshow('tf-pose-estimation Video', image2)
## 이미지를 파일에 저장, VideoWriter 객체에 연속적으로 저장하면 동영상이 됨.
#out1.write(image1)
#out2.write(image2)
fps_time = time.time()
elif result2 > threshold:
cv2.putText(image1, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image2, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image1, "Mind Lower", (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 2.35, (0,0,255), 5)
cv2.imshow('tf-pose-estimation Webcam', image1)
cv2.imshow('tf-pose-estimation Video', image2)
## 이미지를 파일에 저장, VideoWriter 객체에 연속적으로 저장하면 동영상이 됨.
#out1.write(image1)
#out2.write(image2)
fps_time = time.time()
else:
#logger1.debug('show+')
#logger2.debug('show+')
cv2.putText(image1, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.putText(image2, "FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.imshow('tf-pose-estimation Webcam', image1)
cv2.imshow('tf-pose-estimation Video', image2)
## 이미지를 파일에 저장, VideoWriter 객체에 연속적으로 저장하면 동영상이 됨.
#out1.write(image1)
#out2.write(image2)
fps_time = time.time()
if cv2.waitKey(1) == 27: #ESC
break #종료
except:
print("video is over")
break
while True:
if cv2.waitKey(1) & 0xFF == ord('q'): # press Q to destroy all windows
cv2.destroyAllWindows()
break
#cv2.destroyAllWindows()
logger1.debug('finished+')
logger2.debug('finished+')
|
[
"noreply@github.com"
] |
TaehoLi.noreply@github.com
|
1c81bdca5dc92eada6417d4615dab19cc148a555
|
1f61e06271f8e9582669686457c40336235d1eee
|
/Module 1/Chapter 9/edge_detector.py
|
a7221300b04f62e04e673c71daddfa906d6eaed5
|
[
"MIT"
] |
permissive
|
PacktPublishing/Python-Real-World-Machine-Learning
|
f1747fdd73937544a243059ac7476bfbfac6d113
|
669e793cf80c35b6d2028f74bff2cd87b9458791
|
refs/heads/master
| 2023-04-13T11:48:13.459731
| 2023-01-30T09:21:10
| 2023-01-30T09:21:10
| 70,907,139
| 108
| 111
|
MIT
| 2023-04-04T14:38:41
| 2016-10-14T12:13:47
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
import sys
import cv2
import numpy as np
# Load the input image -- 'chair.jpg'
# Convert it to grayscale
input_file = sys.argv[1]
img = cv2.imread(input_file, cv2.IMREAD_GRAYSCALE)
h, w = img.shape
sobel_horizontal = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
sobel_vertical = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
laplacian = cv2.Laplacian(img, cv2.CV_64F)
canny = cv2.Canny(img, 50, 240)
cv2.imshow('Original', img)
cv2.imshow('Sobel horizontal', sobel_horizontal)
cv2.imshow('Sobel vertical', sobel_vertical)
cv2.imshow('Laplacian', laplacian)
cv2.imshow('Canny', canny)
cv2.waitKey()
|
[
"eganl@packtpub.com"
] |
eganl@packtpub.com
|
3e8640eb434a281e1b988d45b8387af02a0249b2
|
a3bdf69d3c5b8e422aae70eb21f3ae5f776e2ff9
|
/utils/excel_utils.py
|
5db791dbd899c620a96d16e483a8feaaa2a2cb18
|
[] |
no_license
|
cyssxt/data-sym-scrapy
|
6ce9d8d60869f89dc5f892984c113fdfeb52aefb
|
5fd92a42ec8a59fead315e2c63fc4d750de24c68
|
refs/heads/master
| 2021-08-23T18:41:02.036963
| 2017-12-06T02:47:39
| 2017-12-06T02:47:39
| 111,883,444
| 0
| 0
| null | 2017-11-24T06:22:27
| 2017-11-24T06:22:27
| null |
UTF-8
|
Python
| false
| false
| 2,437
|
py
|
import xlrd
from xlutils3.copy import copy
class ExcelParse(object):
"""
path:excel路径
sheet:excel中的sheet下表
head_index:excel标题头所在的行数
callback:遍历每一行的回掉
offset:内容体遍历的起始位置
limit:遍历的总记录数
descName:最终生成的excel
"""
def __init__(self, path, sheet=0, head_index=0, callback=None, offset=0, limit=None, desc_name="aaa.xls"):
data = xlrd.open_workbook(path)
self.table = data.sheet_by_index(sheet)
self.nrows = self.table.nrows
self.callback = callback
self.headIndex = head_index
self.ncols = self.table.ncols
self.headers = []
self.bodys = []
self.limit = limit
self.descTable = copy(data)
self.desc_name = desc_name
self.offset = offset
self.parse_header()
def parse(self):
for i in range(self.nrows):
row = self.table.row_values(i)
# data = self.callback and self.callback(self.table, row)
self.callback and self.callback(self.table, row)
def prase_body(self):
start = self.offset or (self.headIndex + 1)
end = (self.limit + start) if None is not self.limit else self.nrows
for bodyIndex in range(start, end):
self.callback and self.callback(self, self.table.row_values(bodyIndex), bodyIndex)
def parse_header(self):
head = self.table.row_values(self.headIndex)
for i in range(self.ncols):
self.headers.append(head[i])
"""
获取单元格的值
"""
def get_cell_value(self, row_index, header):
row = self.table.row_values(row_index)
index = self.headers.index(header)
return row[index]
"""
设置excel的单元格的值
"""
def set_cell_value(self, row_index, header, value, sheet=0):
index = self.headers.index(header)
table = self.descTable.get_sheet(sheet)
table.write(row_index, index, value)
"""
保存
"""
def save(self):
self.descTable.save(self.desc_name)
# http://browser.ihtsdotools.org/api/v1/snomed/en-edition/v20170731/descriptions?query=Headaches&limit=50&searchMode=partialMatching&lang=english&statusFilter=activeOnly&skipTo=0&returnLimit=100&normalize=true
# table, nRows = read_excel("../sym_about.xlsx")
# print nRows
|
[
"cyssxt@163.com"
] |
cyssxt@163.com
|
59f182eac7ff61fa54275583dd65186678b519c5
|
ef34e68712fb4aa9a1320c4e1e370a24de34fcb4
|
/nlu/utils/environment/authentication.py
|
31b46bde24d6ece7e8670f0fe028b52bcba4a11a
|
[
"Apache-2.0"
] |
permissive
|
milyiyo/nlu
|
dd656e77eedf2c831482edfd4ec59490b25d3954
|
d209ed11c6a84639c268f08435552248391c5573
|
refs/heads/master
| 2023-08-16T00:03:10.326392
| 2021-10-16T03:05:49
| 2021-10-16T03:05:49
| 414,223,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
from nlu.utils.environment.env_utils import *
def install_and_import_healthcare(JSL_SECRET):
""" Install Spark-NLP-Healthcare PyPI Package in current enviroment if it cannot be imported and liscense provided"""
import importlib
try:
importlib.import_module('sparknlp_jsl')
except ImportError:
import pip
print("Spark NLP Healthcare could not be imported. Installing latest spark-nlp-jsl PyPI package via pip...")
hc_version = JSL_SECRET.split('-')[0]
import pyspark
pip_major_version = int(pip.__version__.split('.')[0])
if pip_major_version in [10, 18, 19, 20]:
# for these versions pip module does not support installing, we install via OS command.
os.system(
f'pip install spark-nlp-jsl=={hc_version} --extra-index-url https://pypi.johnsnowlabs.com/{JSL_SECRET}')
else:
pip.main(['install', f'spark-nlp-jsl=={hc_version}', '--extra-index-url',
f'https://pypi.johnsnowlabs.com/{JSL_SECRET}'])
finally:
import site
from importlib import reload
reload(site)
globals()['sparknlp_jsl'] = importlib.import_module('sparknlp_jsl')
def authenticate_enviroment(SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY):
"""Set Secret environ variables for Spark Context"""
os.environ['SPARK_NLP_LICENSE'] = SPARK_NLP_LICENSE
os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY_ID
os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_ACCESS_KEY
def get_authenticated_spark(SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET, gpu=False, ):
"""
Authenticates enviroment if not already done so and returns Spark Context with Healthcare Jar loaded
0. If no Spark-NLP-Healthcare, install it via PyPi
1. If not auth, run authenticate_enviroment()
"""
import sparknlp
authenticate_enviroment(SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
install_and_import_healthcare(JSL_SECRET)
import sparknlp_jsl
if is_env_pyspark_2_3(): return sparknlp_jsl.start(JSL_SECRET, spark23=True, gpu=gpu)
if is_env_pyspark_2_4(): return sparknlp_jsl.start(JSL_SECRET, spark24=True, gpu=gpu)
if is_env_pyspark_3_0() or is_env_pyspark_3_1():
return sparknlp_jsl.start(JSL_SECRET, gpu=gpu, public=sparknlp.version())
print(f"Current Spark version {get_pyspark_version()} not supported!")
raise ValueError
def is_authorized_enviroment():
"""Check if auth secrets are set in enviroment"""
SPARK_NLP_LICENSE = os.getenv('SPARK_NLP_LICENSE')
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
return None not in [SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY]
|
[
"christian.kasim.loan@gmail.com"
] |
christian.kasim.loan@gmail.com
|
97a9d2f3330873200fc4325d845a1d95fc6e784d
|
6a1fad01f41de94587c03c3904eaf087610a7422
|
/train.py
|
7499035720d29fae46c0d72b467b0cdca3c9fc9f
|
[] |
no_license
|
acids-ircam/ddsp_pytorch
|
1972c98f3d709d97df3606ef7f55cd724ac40c76
|
aaaf17d939ffbf7e6e4c848994204d07d62721a1
|
refs/heads/master
| 2021-12-12T00:48:31.743973
| 2021-09-07T07:46:51
| 2021-09-07T07:46:51
| 213,240,778
| 381
| 49
| null | 2021-11-26T21:18:46
| 2019-10-06T20:42:51
|
C
|
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
import torch
from torch.utils.tensorboard import SummaryWriter
import yaml
from ddsp.model import DDSP
from effortless_config import Config
from os import path
from preprocess import Dataset
from tqdm import tqdm
from ddsp.core import multiscale_fft, safe_log, mean_std_loudness
import soundfile as sf
from einops import rearrange
from ddsp.utils import get_scheduler
import numpy as np
class args(Config):
CONFIG = "config.yaml"
NAME = "debug"
ROOT = "runs"
STEPS = 500000
BATCH = 16
START_LR = 1e-3
STOP_LR = 1e-4
DECAY_OVER = 400000
args.parse_args()
with open(args.CONFIG, "r") as config:
config = yaml.safe_load(config)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = DDSP(**config["model"]).to(device)
dataset = Dataset(config["preprocess"]["out_dir"])
dataloader = torch.utils.data.DataLoader(
dataset,
args.BATCH,
True,
drop_last=True,
)
mean_loudness, std_loudness = mean_std_loudness(dataloader)
config["data"]["mean_loudness"] = mean_loudness
config["data"]["std_loudness"] = std_loudness
writer = SummaryWriter(path.join(args.ROOT, args.NAME), flush_secs=20)
with open(path.join(args.ROOT, args.NAME, "config.yaml"), "w") as out_config:
yaml.safe_dump(config, out_config)
opt = torch.optim.Adam(model.parameters(), lr=args.START_LR)
schedule = get_scheduler(
len(dataloader),
args.START_LR,
args.STOP_LR,
args.DECAY_OVER,
)
# scheduler = torch.optim.lr_scheduler.LambdaLR(opt, schedule)
best_loss = float("inf")
mean_loss = 0
n_element = 0
step = 0
epochs = int(np.ceil(args.STEPS / len(dataloader)))
for e in tqdm(range(epochs)):
for s, p, l in dataloader:
s = s.to(device)
p = p.unsqueeze(-1).to(device)
l = l.unsqueeze(-1).to(device)
l = (l - mean_loudness) / std_loudness
y = model(p, l).squeeze(-1)
ori_stft = multiscale_fft(
s,
config["train"]["scales"],
config["train"]["overlap"],
)
rec_stft = multiscale_fft(
y,
config["train"]["scales"],
config["train"]["overlap"],
)
loss = 0
for s_x, s_y in zip(ori_stft, rec_stft):
lin_loss = (s_x - s_y).abs().mean()
log_loss = (safe_log(s_x) - safe_log(s_y)).abs().mean()
loss = loss + lin_loss + log_loss
opt.zero_grad()
loss.backward()
opt.step()
writer.add_scalar("loss", loss.item(), step)
step += 1
n_element += 1
mean_loss += (loss.item() - mean_loss) / n_element
if not e % 10:
writer.add_scalar("lr", schedule(e), e)
writer.add_scalar("reverb_decay", model.reverb.decay.item(), e)
writer.add_scalar("reverb_wet", model.reverb.wet.item(), e)
# scheduler.step()
if mean_loss < best_loss:
best_loss = mean_loss
torch.save(
model.state_dict(),
path.join(args.ROOT, args.NAME, "state.pth"),
)
mean_loss = 0
n_element = 0
audio = torch.cat([s, y], -1).reshape(-1).detach().cpu().numpy()
sf.write(
path.join(args.ROOT, args.NAME, f"eval_{e:06d}.wav"),
audio,
config["preprocess"]["sampling_rate"],
)
|
[
"caillon@ircam.fr"
] |
caillon@ircam.fr
|
e8720a42e2d433fa822311add8bf6a44faced378
|
bda32ee120fd07499fad1e5e973249ac15861200
|
/ValidSudoku.py
|
5996590c78e0207e3f98330665a34507963f42cf
|
[] |
no_license
|
congyingTech/leetcode
|
5f76d11a283115e46fdf4f295cf0279f53e692a1
|
35ff5db1ee6abcb3cf1144a9bf5420758e31e6ec
|
refs/heads/master
| 2021-01-21T04:41:20.195451
| 2016-06-16T07:03:09
| 2016-06-16T07:03:09
| 54,643,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
#-*- coding:utf-8 -*-
'''
Created on Mar 30, 2016
@author: congyingw
'''
#Sudoku的要求有三条:每行的数只出现一次,每列的数只出现一次,每个九宫格数只出现一次。
#所以我们要验证这三个条件逐个遍历一遍。
class Solution:
def isValidSudoku(self, board):
for i in range(0, 9):
#第i行(固定的行)j列 or 第j行i列(固定的列)
if not self.isValidList([board[i][j] for j in range(0,9)]) or not self.isValidList([board[j][i] for j in range(0, 9)]):
return False
#检查第三条:九宫格里面是否出现重复,
for i in range(0, 3):
for j in range(0, 3):
if not self.isValidList([board[m][n] for m in range (3 * i, 3 * i + 3)for n in range(3 * j, 3*j + 3)]):
return False
return True
#判断是否是有效的list,去掉. 之后,set可以过滤相同的元素,过滤之后,看len是否相等
def isValidList(self, xs):
xs = list(filter(lambda x: x != '.', xs))
return len(set(xs)) == len(xs)
if __name__ == "__main__":
board = [[5, '.', '.', '.', '.', '.', '.', '.', '.'],
[5, 2, '.', '.', '.', '.', '.', '.', '.'],
['.', '.', 3, '.', '.', '.', '.', '.', '.'],
['.', '.', '.', 4, '.', '.', '.', '.', '.'],
['.', '.', '.', '.', 5, '.', '.', '.', '.'],
['.', '.', '.', '.', '.', 6, '.', '.', '.'],
['.', '.', '.', '.', '.', '.', 7, '.', '.'],
['.', '.', '.', '.', '.', '.', '.', 8, '.'],
['.', '.', '.', '.', '.', '.', '.', '.', 9]]
print(Solution().isValidSudoku(board))
|
[
"congyingTech@163.com"
] |
congyingTech@163.com
|
e93d618ef2a5f5ad993261c09a6a1b7b73293570
|
0fa1d839550f4bfb1d9d0860915770071422f2cd
|
/parrot.py
|
51dd4bbc1ed740272deb7e105a164b4e9cb6f887
|
[] |
no_license
|
crystalDf/Python-Crash-Course-2nd-Edition-Chapter-07-Input
|
b996d5b5bfbf20be039ac2e2314e51d6a51545a1
|
a8838fe405e4ce70e827a6ace98f3502b3a57f45
|
refs/heads/master
| 2023-06-10T08:12:07.675473
| 2021-06-20T14:45:01
| 2021-06-20T14:45:01
| 378,668,714
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
message = input("Tell me something, and I will repeat it back to you: ")
print(message)
prompt = "\nTell me something, and I will repeat it back to you:"
prompt += "\nEnter 'quit' to end the program. "
message = ""
while message != 'quit':
message = input(prompt)
if message != 'quit':
print(message)
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
|
[
"chendong333@gmail.com"
] |
chendong333@gmail.com
|
fa459c1d43ee6d8f2cdbb69fcdada36ad5305039
|
ee239ee6ead1f612fde7bec99be27c02afd11dd4
|
/sentimentanalyzer.py
|
8993e1ccb851c52fd63098f05ac298bcc7c21c68
|
[] |
no_license
|
vaitheeswarang/SentimentAnalyzer
|
fc5a597b6cf37e4b8726f4a559f46ce8fd4751e8
|
426b0e06a33a52b333a1631b66151bdde42eb715
|
refs/heads/master
| 2020-03-27T21:46:03.343604
| 2018-12-07T08:38:00
| 2018-12-07T08:38:00
| 147,174,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
print("*"*50)
#textt="the touch is not bad. the product is not a bad. i love this mobile fisrt. the product is awesome. it is easy to use. cost is too high to buy. camera is not good. comparing with other the quality is very bad. bad mobile. not good to say."
print("*"*50)
textt=input("Enter the review here: ")
text=[x.lower() for x in textt]
print("*"*50)
positive=['good','happy','love','awesome']
negative=['sad','bad','not','no','wont']
"""
for line in text:
sentence = line.split('.')
for word in sentence:
li=list(word.split(' '))
#print(li)
"""
#remove punctuations
punc='''.,!#@'''
no_punc=""
for char in text:
if char not in punc:
no_punc+=char
#print(no_punc)
#remove stopwords
#stop_words=set(stopwords.words('english'))
word_tokens=word_tokenize(no_punc)
stop_words={'on','a','the','is','was','i','it','to','with','other','this'}
filtered_sentence=[]
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
#filtered_sentence=list(no_punc.split(' '))
#print(filtered_sentence)
''' Calculating without Negation words '''
pos_count, neg_count, neutral_count=0,0,0
for i in range(len(filtered_sentence)):
for j in range(len(positive)):
if (filtered_sentence[i]==positive[j]):
pos_count +=1
#print("Positive Word:", filtered_sentence[i])
for k in range(len(negative)):
if (filtered_sentence[i]==negative[k]):
neg_count+=-1
#print("Negative Word:", filtered_sentence[i])
break
#else:
# neutral_count+=neutral_count
total_score = pos_count+neg_count
print("Positive Count: ", +pos_count)
print("Negative Count: ", +neg_count)
print("Total Setiment Score without calculating Negated word: ", +total_score)
print("*"*75)
#print(stop_words)
if total_score>0:
print("The content is Positive")
elif total_score<0:
print("The content is Negative")
else:
print("The content is Neutral")
print("*"*75)
''' Calculating with Negation words '''
score,nscore=0,0
negation_words=['not','never','ever','didnt','no','wont']
pos_count,neg_count,negated_word,nnegated_word=0,0,0,0
for i in range(len(filtered_sentence)):
for k in range(len(negation_words)):
for j in range(len(positive)):
if(filtered_sentence[i]==negation_words[k]) and (filtered_sentence[i+1]==positive[j]):
#print(negation_words[k])
score=1*-1
negated_word+=1
print(filtered_sentence[i]+'_'+filtered_sentence[i+1])
for m in range(len(negation_words)):
for l in range(len(negative)):
if(filtered_sentence[i]==negation_words[m]) and (filtered_sentence[i+1]==negative[l]):
#print(negation_words[m])
nscore=1
nnegated_word+=1
print(filtered_sentence[i]+'_'+filtered_sentence[i+1])
print(filtered_sentence)
print("Total number of Negated words: ", negated_word)
print("The Total Score of Positive word with Negated_word is: ", score*negated_word)
print('\n')
print("Total number of Negative word with Negated words: ", nnegated_word)
print("The Total Score of Negative word with Negated_word is: ", nscore*nnegated_word)
print('\n')
print("The Total Sentiment Score with Negation words only: ", (score*negated_word)+(nscore*nnegated_word))
print("*"*75)
neg_total_score=(score*negated_word)+(nscore*nnegated_word)
if neg_total_score>0:
print("The content is Positive")
elif neg_total_score<0:
print("The content is Negative")
else:
print("The content is Neutral")
print("*"*75)
|
[
"noreply@github.com"
] |
vaitheeswarang.noreply@github.com
|
37ed82c45df03e22c5d1a9edd666017218ee89f1
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/zscaler/icon_zscaler/util/helpers.py
|
3a839a924b1fefd1411e0082e08af7540ce22557
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980
| 2023-08-31T10:25:36
| 2023-08-31T10:25:36
| 190,435,635
| 61
| 60
|
MIT
| 2023-09-14T08:47:37
| 2019-06-05T17:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,440
|
py
|
from insightconnect_plugin_runtime.exceptions import PluginException
from re import sub, match, split
from typing import Union, Any, Dict
from icon_zscaler.util.constants import Assistance, Cause
CAMEL_CASE_REGEX = r"\b[a-z0-9]+([A-Z][a-z]+[0-9]*)*\b"
PASCAL_CASE_REGEX = r"\b[A-Z][a-z]+[0-9]*([A-Z][a-z]+[0-9]*)*\b"
CAMEL_CASE_ACRONYM_REGEX = r"\b[a-z0-9]+([A-Z]+[0-9]*)*\b"
def clean_dict(dictionary: Dict[str, Any]) -> Dict[str, Any]:
cleaned_dict = dictionary.copy()
for key, value in dictionary.items():
if isinstance(value, dict):
cleaned_dict[key] = clean_dict(value)
if cleaned_dict[key] == {}:
del cleaned_dict[key]
elif value in [None, "", 0, [], {}]:
del cleaned_dict[key]
return cleaned_dict
def remove_password_from_result(dictionary: dict) -> dict:
return {key: value for key, value in dictionary.copy().items() if key != "password"}
def prepare_department(department_api_result: list, given_department_name: str) -> dict:
for department in department_api_result:
if department.get("name") == given_department_name:
return department
raise PluginException(
cause=Cause.DEPARTMENT_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
def prepare_groups(groups_api_result: list, given_groups_names: list) -> list:
result_list = []
available_names = [item.get("name") for item in groups_api_result]
for name in given_groups_names:
if name not in available_names:
raise PluginException(
cause=Cause.GROUP_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
for group in groups_api_result:
for name in given_groups_names:
if name == group.get("name"):
result_list.append(group)
return result_list
def to_camel_case(provided_string: str) -> str:
if match(CAMEL_CASE_REGEX, provided_string):
return provided_string
if match(PASCAL_CASE_REGEX, provided_string):
return provided_string[0].lower() + provided_string[1:]
if match(CAMEL_CASE_ACRONYM_REGEX, provided_string):
words = split(r"(?<=[a-z0-9])(?=[A-Z])|(?<=[A-Z0-9])(?=[a-z])", provided_string)
result = "".join([w.title() for w in words])
return result[0].lower() + result[1:]
init, *temp = provided_string.split("_")
result = "".join([init.lower(), *map(str.title, temp)])
return result
def convert_dict_keys_to_camel_case(to_modify: Union[dict, list]) -> Union[dict, list]:
if isinstance(to_modify, list):
return [convert_dict_keys_to_camel_case(element) for element in to_modify]
elif isinstance(to_modify, dict):
output_dict = {}
for key, value in to_modify.items():
output_dict[to_camel_case(key)] = convert_dict_keys_to_camel_case(value)
return output_dict
else:
return to_modify
def filter_dict_keys(dict_to_modify: dict, keys_to_keep: list) -> dict:
if not isinstance(dict_to_modify, dict):
return dict_to_modify
return {key: dict_to_modify.get(key) for key in keys_to_keep if key in dict_to_modify}
def find_custom_url_category_by_name(url_category_name: str, url_categories_list: list) -> dict:
if not url_categories_list or not url_category_name:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
url_category = list(
filter(lambda category: category.get("configuredName") == url_category_name, url_categories_list)
)
if url_category and url_category[0].get("id"):
return url_category[0]
else:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
def find_url_category_by_id(url_category_id: str, url_categories_list: str) -> dict:
if not url_categories_list or not url_category_id:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
url_category = list(filter(lambda category: category.get("id") == url_category_id, url_categories_list))
if url_category and url_category[0].get("id"):
return url_category[0]
else:
raise PluginException(
cause=Cause.CATEGORY_NOT_FOUND,
assistance=Assistance.VERIFY_INPUT,
)
|
[
"noreply@github.com"
] |
rapid7.noreply@github.com
|
7bfbdeac3b9d9dcaf8e91951da6cda2e2c7d6ce8
|
7daf78ed6ddc52c46b7d83db865cb6bd57a451ac
|
/spi_test.py
|
bac96a76c0db60104f1b5eb8cbb731cffd3ffb3f
|
[] |
no_license
|
bitbytebitco/radpc_ai
|
684b038fb14d117afec189e4871160fb021d2186
|
cacf80efa37acd6bf563ec27697b3fd594ed4720
|
refs/heads/master
| 2023-06-29T01:50:43.178256
| 2021-08-05T02:59:21
| 2021-08-05T02:59:21
| 365,015,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
import os
import spidev
import time
import RPi.GPIO as GPIO
# GPIO init
GPIO.setmode(GPIO.BCM)
GPIO.setup(23,GPIO.IN, pull_up_down=GPIO.PUD_UP)
# SPI init
spi_bus =0
spi_device = 0
spi = spidev.SpiDev()
spi.open(spi_bus, spi_device)
#spi.max_speed_hz = 4000
#spi.max_speed_hz = 151000
spi.max_speed_hz = 145000 # NOTE: CLOCK STUFF IS WEIRD!
spi.no_cs = False
ACK = 0xEE
BINARY = False
# TODO: replace print statements with logging
def send_receive(channel=None):
try:
print('')
print('send_receive')
print('SENDING: ACK(`{}`)'.format(hex(ACK)))
print(hex(ACK))
ack_resp = spi.xfer([ACK]) # send 0xEE to MSP to start sequence
command = spi.readbytes(1)[0] # byte a byte which will be the COMMAND FOR THE RPI
if hex(command) == "0x22":
print('Command: RESET')
spi.writebytes([0x22]) # SEND CONFIRMATION (echo command)
elif hex(command) == "0x25":
print('Command: PACKET')
if True:
fake = []
for i in range(127):
fake.append(0x00)
spi.writebytes([0x25]) # SEND CONFIRMATION (echo command)
packet = spi.xfer(fake) # read PACKET
else:
spi.writebytes([0x25])
packet = spi.readbytes(128)
#print('packet data')
#print(packet)
#print([hex(i) for i in packet])
#print("".join([chr(i) for i in packet]))
bytes_str = bytes(bytearray([i for i in packet]))
#print(str(bytes_str))
packet_count = len(os.listdir(os.path.join(os.getcwd(), "packets"))) # count files for filename usage
with open('packets/{}.txt'.format(int(packet_count)+1), 'w') as binary_file:
if not(BINARY):
binary_file.write(str(bytes_str)) # saving as a str
else:
binary_file.write(bytes_str) # saving as binary needs `wb` in open()
print('packet {} saved'.format(packet_count))
else:
print("ELSE!")
print(hex(command))
except Exception as e:
print(e)
# SETUP Interrupt
GPIO.add_event_detect(23, GPIO.RISING, callback=send_receive, bouncetime=500)
# Main Loop
while True:
pass
print('####')
print('####')
|
[
"bitbytebitco@gmail.com"
] |
bitbytebitco@gmail.com
|
74b1acca6c71d5a2a04373300690f2d2ca2e9bbd
|
1bd47bb8cffa81a8e7d578d3826a420e9750e161
|
/main.py
|
86049ea3a70e311a4e73e8ccc283d6a0a5982466
|
[] |
no_license
|
Damir10165/TableAndTree
|
24d8a5ef0afa4938ee05d833351d74d624ebe7aa
|
94fb3933066570c0ded7db5670ca1cc8abf22428
|
refs/heads/main
| 2023-08-17T10:58:39.704553
| 2021-07-16T12:43:26
| 2021-07-16T12:43:26
| 386,104,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,339
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import (QApplication, QWidget, QToolBar, QPushButton,
QMainWindow, QAction, QTextEdit, QGridLayout,
QTableView)
from PyQt5 import QtSql
import random
import sys
import os
DATABASE_NAME = 'example.db'
def Connect_DataBase():
if os.path.exists(DATABASE_NAME):
return Open_DataBase()
else:
return Create_DataBase()
def Open_DataBase():
con = QtSql.QSqlDatabase.addDatabase('QSQLITE')
con.setDatabaseName(DATABASE_NAME)
if con.open():
print("Open data base is success")
return con
else:
print("Error open data base")
return None
def Create_DataBase():
con = Open_DataBase()
if con is not None:
if Create_DataBase_Table(con):
print("Create data base is success")
return con
else:
print("Error create table")
return None
else:
print("Error open data base for create table")
return None
def Create_DataBase_Table(con):
if con.exec("CREATE TABLE Numbers (a float, b float, c float)"):
print("Create table is success")
return True
else:
print("Error create table")
return None
class Table(QTableView):
def __init__(self):
super().__init__()
self.con = Connect_DataBase()
self.model = QtSql.QSqlTableModel(self, self.con)
self.model.setTable('Numbers')
self.model.setEditStrategy(QtSql.QSqlTableModel.OnFieldChange)
self.model.select()
self.setModel(self.model)
def add_row(self):
rec = QtSql.QSqlRecord()
rec.append(QtSql.QSqlField('a'))
rec.append(QtSql.QSqlField('b'))
rec.append(QtSql.QSqlField('c'))
rec.setValue('a', float('{:.2}'.format(random.uniform(0,1))))
rec.setValue('b', float('{:.2}'.format(random.uniform(0,1))))
rec.setValue('c', float('{:.2}'.format(random.uniform(0,1))))
self.model.insertRecord(-1, rec)
class Window(QMainWindow):
#главное окно
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#Таблица
self.table1 = Table()
#кнопки добавления строк и столбцов
Action_1 = QAction('Добавить строку', self)
Action_1.triggered.connect(self.table1.add_row)
self.toolbar = self.addToolBar('Добавить строку')
self.toolbar.addAction(Action_1)
#таблица и дерево
window = QWidget()
Tree = QTextEdit()
grid = QGridLayout()
grid.setSpacing(5)
grid.addWidget(self.table1, 1, 0)
grid.addWidget(Tree, 1, 1)
window.setLayout(grid)
self.setCentralWidget(window)
self.setCentralWidget(window)
self.setGeometry(500, 500, 500, 500)
self.setWindowTitle("Главное окно")
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Window()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
Damir10165.noreply@github.com
|
e3cb34e969e398b08d9c43935908f7b26d4014f0
|
000a4b227d970cdc6c8db192f4437698cb782721
|
/python/helpers/typeshed/stubs/stripe/stripe/api_resources/charge.pyi
|
2e3467e67bc5e6b90113fd9988cf281a375bde2c
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
trinhanhngoc/intellij-community
|
2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d
|
1d4a962cfda308a73e0a7ef75186aaa4b15d1e17
|
refs/heads/master
| 2022-11-03T21:50:47.859675
| 2022-10-19T16:39:57
| 2022-10-19T23:25:35
| 205,765,945
| 1
| 0
|
Apache-2.0
| 2019-09-02T02:55:15
| 2019-09-02T02:55:15
| null |
UTF-8
|
Python
| false
| false
| 851
|
pyi
|
from typing import Any
from stripe import api_requestor as api_requestor
from stripe.api_resources.abstract import (
CreateableAPIResource as CreateableAPIResource,
ListableAPIResource as ListableAPIResource,
UpdateableAPIResource as UpdateableAPIResource,
custom_method as custom_method,
)
class Charge(CreateableAPIResource, ListableAPIResource, UpdateableAPIResource):
OBJECT_NAME: str
def capture(self, idempotency_key: Any | None = ..., **params): ...
def refund(self, idempotency_key: Any | None = ..., **params): ...
def update_dispute(self, idempotency_key: Any | None = ..., **params): ...
def close_dispute(self, idempotency_key: Any | None = ..., **params): ...
def mark_as_fraudulent(self, idempotency_key: Any | None = ...): ...
def mark_as_safe(self, idempotency_key: Any | None = ...): ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
ecff5efbd76a2dde9d681d2784b800b6663f9127
|
b7c3511bf4293c12510f0eab72f6d2bffd05be19
|
/web_chat/chat/tests.py
|
4b658e6522c77c5c9eb85d331569b99c4f7bd68e
|
[] |
no_license
|
vramos1/backend-arquitectura
|
ab7e8831f899a038902c83c3bef89b202c432abb
|
159bad5b1e454328df6981d8935c909a8204e36d
|
refs/heads/master
| 2023-01-29T21:14:50.188507
| 2020-11-23T21:04:13
| 2020-11-23T21:04:13
| 310,602,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
from django.test import TestCase
from web_chat.chat.models import Room, Chat, Apply
from django.contrib.auth.models import User
# Create your tests here.
class ChatTest(TestCase):
def setUp(self):
self.user = User.objects.create(
is_superuser=False, password="test", username="test"
)
self.room = Room.objects.create(
name="test room", private=True, creator=self.user
)
self.room.users.add(self.user)
self.room.save()
self.message = Chat.objects.create(
username=self.user.username,
room=self.room,
message="this is a test message",
)
self.apply = Apply.objects.create(user=self.user, room=self.room)
def test_get_creator(self):
creator_username = self.room.get_creator()
self.assertEqual(creator_username, self.user.username)
def test_get_all_messages(self):
chats = self.room.get_all_messages()
self.assertEqual(chats[0].message, self.message.message)
def test_get_user_and_room(self):
username_string = self.apply.get_user_and_room()
correct_string = f"{self.user.username} => {self.room.name}"
self.assertEqual(username_string, correct_string)
|
[
"vramos1@uc.cl"
] |
vramos1@uc.cl
|
5a42411bbb622d7b2f01c712aad314bdfc8d9c6f
|
953b15ef3ff7d2a1b12d86dbf13f15af7eb8aa4a
|
/a3c_training_thread.py
|
a050c771ade48fcefdb98c1de9d1693718ecc548
|
[
"Apache-2.0"
] |
permissive
|
MightyChaos/async_transfer_rl
|
2fe213dda375f3df165dbe277aa656319997eb34
|
000601773d2e226848a49eb71cfc9d89cc6df9bb
|
refs/heads/master
| 2021-01-20T04:47:55.741247
| 2017-04-29T03:16:10
| 2017-04-29T03:16:10
| 89,735,265
| 0
| 0
| null | 2017-04-28T18:46:26
| 2017-04-28T18:46:26
| null |
UTF-8
|
Python
| false
| false
| 6,143
|
py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import random
import time
import sys
from game_state import GameState
from game_state import ACTION_SIZE
from game_ac_network import GameACFFNetwork, GameACLSTMNetwork
from constants import GAMMA
from constants import LOCAL_T_MAX
from constants import ENTROPY_BETA
from constants import USE_LSTM
LOG_INTERVAL = 100
PERFORMANCE_LOG_INTERVAL = 1000
class A3CTrainingThread(object):
def __init__(self,
thread_index,
global_network,
initial_learning_rate,
learning_rate_input,
grad_applier,
max_global_time_step,
device):
self.thread_index = thread_index
self.learning_rate_input = learning_rate_input
self.max_global_time_step = max_global_time_step
if USE_LSTM:
self.local_network = GameACLSTMNetwork(ACTION_SIZE, thread_index, device)
else:
self.local_network = GameACFFNetwork(ACTION_SIZE, thread_index, device)
self.local_network.prepare_loss(ENTROPY_BETA)
with tf.device(device):
var_refs = [v._ref() for v in self.local_network.get_vars()]
self.gradients = tf.gradients(
self.local_network.total_loss, var_refs,
gate_gradients=False,
aggregation_method=None,
colocate_gradients_with_ops=False)
self.apply_gradients = grad_applier.apply_gradients(
global_network.get_vars(),
self.gradients )
self.sync = self.local_network.sync_from(global_network)
self.game_state = GameState(113 * thread_index)
self.local_t = 0
self.initial_learning_rate = initial_learning_rate
self.episode_reward = 0
# variable controling log output
self.prev_local_t = 0
def _anneal_learning_rate(self, global_time_step):
learning_rate = self.initial_learning_rate * (self.max_global_time_step - global_time_step) / self.max_global_time_step
if learning_rate < 0.0:
learning_rate = 0.0
return learning_rate
def choose_action(self, pi_values):
return np.random.choice(range(len(pi_values)), p=pi_values)
def _record_score(self, sess, summary_writer, summary_op, score_input, score, global_t):
summary_str = sess.run(summary_op, feed_dict={
score_input: score
})
summary_writer.add_summary(summary_str, global_t)
summary_writer.flush()
def set_start_time(self, start_time):
self.start_time = start_time
def process(self, sess, global_t, summary_writer, summary_op, score_input):
states = []
actions = []
rewards = []
values = []
terminal_end = False
# copy weights from shared to local
sess.run( self.sync )
start_local_t = self.local_t
if USE_LSTM:
start_lstm_state = self.local_network.lstm_state_out
# t_max times loop
for i in range(LOCAL_T_MAX):
pi_, value_ = self.local_network.run_policy_and_value(sess, self.game_state.s_t)
action = self.choose_action(pi_)
states.append(self.game_state.s_t)
actions.append(action)
values.append(value_)
if (self.thread_index == 0) and (self.local_t % LOG_INTERVAL == 0):
print("local_iter={0} pi={1} V={2}".format(i, pi_, value_))
#print(" V={}".format(value_))
# process game
self.game_state.process(action)
# receive game result
reward = self.game_state.reward
terminal = self.game_state.terminal
self.episode_reward += reward
# clip reward
rewards.append( np.clip(reward, -1, 1) )
self.local_t += 1
# s_t1 -> s_t
self.game_state.update()
if terminal:
terminal_end = True
print("score={}".format(self.episode_reward))
self._record_score(sess, summary_writer, summary_op, score_input,
self.episode_reward, global_t)
self.episode_reward = 0
self.game_state.reset()
if USE_LSTM:
self.local_network.reset_state()
break
R = 0.0
if not terminal_end:
R = self.local_network.run_value(sess, self.game_state.s_t)
actions.reverse()
states.reverse()
rewards.reverse()
values.reverse()
batch_si = []
batch_a = []
batch_td = []
batch_R = []
# compute and accmulate gradients
for(ai, ri, si, Vi) in zip(actions, rewards, states, values):
R = ri + GAMMA * R
td = R - Vi
a = np.zeros([ACTION_SIZE])
a[ai] = 1
batch_si.append(si)
batch_a.append(a)
batch_td.append(td)
batch_R.append(R)
cur_learning_rate = self._anneal_learning_rate(global_t)
if USE_LSTM:
batch_si.reverse()
batch_a.reverse()
batch_td.reverse()
batch_R.reverse()
sess.run( self.apply_gradients,
feed_dict = {
self.local_network.s: batch_si,
self.local_network.a: batch_a,
self.local_network.td: batch_td,
self.local_network.r: batch_R,
self.local_network.initial_lstm_state: start_lstm_state,
self.local_network.step_size : [len(batch_a)],
self.learning_rate_input: cur_learning_rate } )
else:
sess.run( self.apply_gradients,
feed_dict = {
self.local_network.s: batch_si,
self.local_network.a: batch_a,
self.local_network.td: batch_td,
self.local_network.r: batch_R,
self.learning_rate_input: cur_learning_rate} )
if (self.thread_index == 0) and (self.local_t - self.prev_local_t >= PERFORMANCE_LOG_INTERVAL):
self.prev_local_t += PERFORMANCE_LOG_INTERVAL
elapsed_time = time.time() - self.start_time
steps_per_sec = global_t / elapsed_time
print("### Performance : {} STEPS in {:.0f} sec. {:.0f} STEPS/sec. {:.2f}M STEPS/hour".format(
global_t, elapsed_time, steps_per_sec, steps_per_sec * 3600 / 1000000.))
# return advanced local step size
diff_local_t = self.local_t - start_local_t
return diff_local_t
|
[
"yangluonaluna@gmail.com"
] |
yangluonaluna@gmail.com
|
ca93607b79207cc78004efd8c339bc2ab7e9d567
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5670465267826688_0/Python/yagao0o/dijkstra.py
|
96ca68f0bdc39cf5943ae362656074c360209c95
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,207
|
py
|
# author: yagoa0o
# date : 2015/04/11
class Solution():
multi_square = {'1': {'1': '1', 'i': 'i', 'j': 'j', 'k': 'k'},
'i': {'1': 'i', 'i': '-1', 'j': 'k', 'k': '-j'},
'j': {'1': 'j', 'i': '-k', 'j': '-1', 'k': 'i'},
'k': {'1': 'k', 'i': 'j', 'j': '-i', 'k': '-1'}}
def get_result(self, input_file_name, output_file_name):
infile = open(input_file_name)
outfile = open(output_file_name, "w+")
total = int(infile.readline())
# main procedure
for i in range(total):
input_parms = infile.readline().split()
l = int(input_parms[0])
x = int(input_parms[1])
characters = infile.readline()
result = 'Yes'
if x % 4 != 0 and l > 1:
#count characters
cal_result = '1'
cal_string = characters[:l] * (x % 4 + 8) if x > 12 else characters[:l] * x
got_i = False
got_j = False
for char in cal_string:
cal_result = self.multiply(cal_result, char)
if (not got_i) and cal_result == 'i':
got_i = True
if (not got_j) and got_i and cal_result == 'k':
got_j = True
if cal_result == '-1' and got_i and got_j:
result = 'YES'
else:
result = 'NO'
else:
result = 'NO'
outfile.write('Case #' + str(i + 1) + ': ' + result + '\n')
infile.close()
outfile.close()
return False
def multiply(self, a, b):
is_negative = False
is_negative = is_negative != (a[0] == '-')
is_negative = is_negative != (b[0] == '-')
result = self.multi_square[a[-1]][b[-1]]
is_negative = is_negative != (result[0] == '-')
if not is_negative:
return result[-1]
else:
return '-' + result[-1]
solu = Solution()
file_name = 'C-small-attempt2'
solu.get_result(file_name + '.in', file_name + '.out')
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
7f3bf8c6c37f51045aadb727540f9a198debe4ce
|
a08670c23f1a8fed994fb781df93ce87a1ffc422
|
/web/base/__init__.py
|
658f51a797e95a51bb9111273933bf7998f7c2a2
|
[] |
no_license
|
roenfun/selenium-demo
|
962d53a459756716dea5bc81bdbc82105f619f61
|
7d5e46d8472cc22617d00df17f0fd272df19dff3
|
refs/heads/master
| 2023-03-21T13:27:49.707425
| 2021-03-15T03:23:37
| 2021-03-15T03:23:37
| 266,483,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
# @File : __init__.py.py
# @Author : Chad
# @Time : 2020-05-23
# coding:utf-8
|
[
"Chad.long@vipshop.com"
] |
Chad.long@vipshop.com
|
0bea2a6533e2180f7d91076749f8da9dd71d235f
|
774df3c9bbb64889ae97b13363baf49709ad3f6c
|
/menwith/network.py
|
62d8a64f388a8a7199a6d71ab04d2a051c1d36ff
|
[] |
no_license
|
gmr/menwith
|
39095b8730d7ae87cd3d679bb65f3c6f1e775635
|
e03f51ccfa8db4db036f2f6e0fa640c579f1e150
|
refs/heads/master
| 2020-04-26T22:21:24.168247
| 2011-11-28T04:03:21
| 2011-11-28T04:03:21
| 173,705
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,961
|
py
|
"""
Main PCAP interface for listening on the NIC for data
"""
import logging
import pcap
from socket import ntohs, IPPROTO_TCP, IPPROTO_UDP
import struct
from . import memcache
# Ethernet constants
_ETHERTYPE_IPV4 = '\x08\x00'
# IPv4 Constants
_IPV4_BASE_HEADER_SIZE = 20 # Default IPv4 header size
_IPV4_OPTIONS_OFFSET = 20 # Bit 160
# TCP Constants
_TCP_BASE_HEADER_SIZE = 24
# Port we want to use by default
_MEMCACHED_PORT = 11211
# How many bytes to read
_SNAPSHOT_LENGTH = 65535
# Doesn't do anything in Linux
_TIMEOUT = 100
class TCPCapture(object):
def __init__(self, queue, device, port=_MEMCACHED_PORT):
"""Create a new TCPCapture object for the given device and port.
:param Queue queue: The cross-thread queue to create
:param str device: The device name (eth0, en1, etc)
:param int port: The port to listen on
:raises: ValueError
"""
self._logger = logging.getLogger('menwith.network.TCPCapture')
self._logger.debug('Setup with queue: %r', queue)
self._queue = queue
self._running = False
# Create the PCAP object
self._pcap = self._setup_libpcap(device, port)
def _char_conversion(self, value):
"""Convert the bytes to a character returning the converted string.
:param str value: The string to convert
:returns: str
"""
return ''.join(['%c' % byte for byte in value])
def _ethernet_decode(self, packet_in):
"""Extract the ethernet header, returning the ethernet header and
the remaining parts of the packet.
:param str packet_in: The full packet
:returns: tuple
"""
return (self._format_bytes(packet_in[0:6], ':'),
self._format_bytes(packet_in[6:12], ':'),
packet_in[12:14],
packet_in[14:])
def _format_bytes(self, value, delimiter=''):
"""Format a byte string returning the formatted value with the
specified delimiter.
:param str value: The byte string
:param str delimiter: The optional delimiter
:returns: str
"""
return delimiter.join(['%0.2x' % ord(byte) for byte in value])
def _ipv4_decode(self, packet_in):
"""Extract the IP header and populate a dictionary of values, returning
a the dictionary and the remaining data to extract.
:param str packet_in: The IP packet data
:returns: tuple
"""
out = {'version': struct.unpack('b', packet_in[0])[0] >> 4,
'ihl': (struct.unpack('b', packet_in[0])[0] & 0x0F) * 4,
'total_length': ntohs(struct.unpack('H', packet_in[2:4])[0]),
'identification': ntohs(struct.unpack('H', packet_in[4:6])[0]),
'flags': (ord(packet_in[6]) & 0xe0) >> 5,
'fragment_offset': (ntohs(struct.unpack('H',
packet_in[6:8])[0]) &
0x1f),
'ttl': ord(packet_in[8]),
'protocol': ord(packet_in[9]),
'checksum': ntohs(struct.unpack('H', packet_in[10:12])[0]),
'source': pcap.ntoa(struct.unpack('i', packet_in[12:16])[0]),
'destination': pcap.ntoa(struct.unpack('i',
packet_in[16:20])[0])}
# If our header size is more than 5 bytes, we have options
if out['ihl'] > _IPV4_BASE_HEADER_SIZE:
out['options'] = packet_in[_IPV4_BASE_HEADER_SIZE:out['ihl']]
else:
out['options'] = None
# Return the decoded packet
return out, packet_in[out['ihl']:]
def _process_packet(self, packet_length, packet_in, timestamp):
"""Called by libpcap's dispatch call, we receive raw data that needs
to be decoded then appended to the tcp buffer. When a full IP packet
is received, construct the TCP header dictionary.
:param int packet_length: The length of the packet received
:param str packet_in: The packet to be processed
:param float timestamp: The timestamp the packet was received
"""
# Extract the parts of the packet
dest, source, ethertype, payload = self._ethernet_decode(packet_in)
self._logger.debug(('Destination MAC Address: %s '
'Source MAC Address: %s'), dest, source)
# If we have an IPv4 ethertype, process it
if ethertype == _ETHERTYPE_IPV4:
# Process the IPv4 Header
ipv4_header, ipv4_payload = self._ipv4_decode(payload)
# Log the IPv4 Header values
self._logger.debug('IPv4 Header: %r', ipv4_header)
# Determine how to decode
if ipv4_header['protocol'] == IPPROTO_TCP:
# Decode the TCP Header
tcp_header, tcp_payload = self._tcp_decode(ipv4_payload)
# Log the TCP Header values
self._logger.debug('TCP Header: %r', tcp_header)
# Add the TCP data to the Queue for decoding
if tcp_payload:
self._queue.put(tcp_payload)
def _setup_libpcap(self, device, port):
"""Setup the pcap object and return the handle for it.
:returns: pcap.pcapObject
"""
# Validate the device
if not self._validate_device(device):
raise ValueError('Can not validate the device: %s' % device)
# Create the pcap object
pcap_object = pcap.pcapObject()
# Open the device in promiscuous mode
try:
pcap_object.open_live(device, _SNAPSHOT_LENGTH, True, _TIMEOUT)
self._logger.info('Opened %s', device)
except Exception as error:
raise OSError('Permission error opening device %s' % error)
# Set our filter up
filter = 'dst port %i' % port
# Create our pcap filter looking for ip packets for the memcached server
pcap_object.setfilter(filter, 1, 0)
self._logger.info('Filter set to: %s', filter)
# Set our operation to non-blocking
pcap_object.setnonblock(1)
# Return the handle to the pcap object
return pcap_object
def _tcp_decode(self, packet_in):
"""Extract the TCP header and populate a dictionary of values, returning
a the dictionary and the remaining data to extract.
:param str packet_in: The TCP packet data
:returns: tuple
"""
self._logger.debug('TCP Packet: %r', packet_in)
out = {'source_port': ntohs(struct.unpack('H', packet_in[0:2])[0]),
'dest_port': ntohs(struct.unpack('H', packet_in[2:4])[0]),
'data_offset': struct.unpack('B', packet_in[12])[0] >> 4}
return out, self._char_conversion(packet_in[(_TCP_BASE_HEADER_SIZE +
out['data_offset']):])
def _validate_device(self, device_name):
"""Validate the given device name as being available to the application.
While this is more hoops than just pcap.lookupdev, we can get a full
list of ip addresses we're listening for from this method.
:param str device_name: The device name to validate
:returns: Bool
"""
# Get all the devices available
devices = pcap.findalldevs()
# Iterate through the devices looking for the one we care about
for device in devices:
# Is this the droid, err device we are looking for?
if device[0] == device_name:
self._logger.debug('Validated device %s', device_name)
# Output ip addresses if there are any
if device[2]:
ip_addresses = list()
for address_info in device[2]:
ip_addresses.append(address_info[0])
self._logger.info('IP addresses to listen on: %r',
ip_addresses)
# Device validates
return True
# It was not found
return False
def process(self):
"""Start processing packets, dispatching received packets to the
TCPCapture._process_raw_data method.
Will loop as long as self._running is True
"""
# We want to process
self._running = True
# Iterate as long as we're processing
while self._running:
# Dispatch the reading of packets, as many as we can get
self._pcap.dispatch(1, self._process_packet)
def stop(self):
"""Causes the blocking listen call to stop."""
# Toggle the bool looped on in the listen method
self._running = False
# Log that the processing has been told to stop
self._logger.info('Indicated that processing of packets should stop')
|
[
"gmr@myyearbook.com"
] |
gmr@myyearbook.com
|
b1d967e610d4020d110343b3a61c6a12bb491305
|
709522eb6c9730f2095df3d0f89b02b658e93e18
|
/run.py
|
6d7679064d87dd38d491dcc181560356ff89813e
|
[] |
no_license
|
haracewiat/MultiplayerGame
|
1bb549c4bae6655ece3f5c2386d33a2a042ac7d7
|
4896626da59c280a7a8ae22026cd33dd0818095f
|
refs/heads/master
| 2023-07-20T02:12:27.227911
| 2020-03-11T17:41:54
| 2020-03-11T17:41:54
| 246,146,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from game import Game
if __name__ == "__main__":
print("run")
g = game.Game()
g.run()
|
[
"b.haracewiat@gmail.com"
] |
b.haracewiat@gmail.com
|
d8827844e462a3b2f1b7a4bfea42cf1fa45b6f4e
|
a080b5cb90d0e274b5a7aec3adb9bfdf4d5b8cd5
|
/jpgtopng.py
|
c01fa5b2f05dab9aa03590a67748832f4389f50d
|
[] |
no_license
|
nickcodes-py/JPGtoPNG-Converter
|
df2025855c87558c054de4dee7c73885d973b77a
|
c521cdba80008a2e0d3e0572b1bb7360020642b2
|
refs/heads/master
| 2020-12-04T20:55:37.693165
| 2020-01-05T10:18:14
| 2020-01-05T10:18:14
| 231,899,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
import sys
import os
from PIL import Image, ImageFilter
#Grab Current and New Folder
image_folder = sys.argv[1]
output_folder = sys.argv[2]
#Check if New exist, or create
if not os.path.exists(output_folder):
os.makedirs(output_folder)
#Loop through folder, convert images to PNG
for filename in os.listdir(image_folder):
img = Image.open(f'{image_folder}{filename}')
clean_name = os.path.splitext(filename)[0]
img.save(f'{output_folder}{clean_name}', 'png') #Save to new folder
|
[
"nikunj.dutt@outlook.com"
] |
nikunj.dutt@outlook.com
|
224730f81d5c198b3bf51777919019a771f07a63
|
71c47b4c8f35279c1f875c499ecae3c2eaf65432
|
/Module_1/homework/100NSqrt.py
|
5976441d21bd1b5d6e2749fc5d97916fb9033217
|
[] |
no_license
|
isemiguk/Python_Coursera
|
e53ead7ed966a0b11a9c43667caa00f9c4504328
|
ce7d5904a91dfadd57d9a2bb4fb413d4c23c7a3c
|
refs/heads/master
| 2022-11-06T13:56:50.926356
| 2020-07-01T19:12:22
| 2020-07-01T19:12:22
| 266,181,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
a = input()
print(int(a * 100)**2)
|
[
"igorsemiguk@gmail.com"
] |
igorsemiguk@gmail.com
|
ce95dbf6fcaaaf9619bb3af852fd822f2f11193d
|
aa74d22568d80866488a321026c369a12346484f
|
/microblog/app/venv/bin/flask
|
ffb649ae118d9a9f02968aa474a905ff4e264b54
|
[] |
no_license
|
nptravis/python-studies
|
0ca5e3d804beb2b9b7e3484606753324c9151bab
|
d649ac614bf9efdce1cc50e015f9252d1eba371c
|
refs/heads/master
| 2021-09-03T13:18:27.621292
| 2018-01-09T09:43:39
| 2018-01-09T09:43:39
| 114,952,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
#!/Users/Nic/Documents/projects/cs50/python/microblog/app/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nptravis@yahoo.com"
] |
nptravis@yahoo.com
|
|
9148b220bb576626f27c0f2cfb3cb25ebbcd7139
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/datatypes/facets/byte/byte_max_inclusive001_xsd/__init__.py
|
0444a6966ba5abae09b921e38485930982799bb6
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
from output.models.ms_data.datatypes.facets.byte.byte_max_inclusive001_xsd.byte_max_inclusive001 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
ec4b7992e11399e05868bd6cd613c86cd686efa1
|
bf02d48baebee89a95b2c637b6d017ca300039ef
|
/tictactoe/settings.py
|
f9e2d7ba7de9bf3f09d6d21562d2bce0679fe770
|
[] |
no_license
|
maheshwars/Tic-Tac-Toe
|
c4014b9d0c833e4d0648066f319b42a60e6d589e
|
b2ea1c5f0b84f8fbd5e360e323d4030e8fc82589
|
refs/heads/master
| 2022-12-09T12:41:50.696273
| 2020-09-09T21:30:27
| 2020-09-09T21:30:27
| 294,225,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,509
|
py
|
"""
Django settings for tictactoe project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%bx5#d_6-%-3#v(5p4t=n*svppv280hmaz^g3fdiuk95s--01p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gameplay',
'player',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tictactoe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tictactoe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
LOGIN_REDIRECT_URL = "player_home"
LOGOUT_REDIRECT_URL = "tictactoe_welcome"
LOGIN_URL = "player_login"
CRISPY_TEMPLATE_PACK ='bootstrap3'
|
[
"noreply@github.com"
] |
maheshwars.noreply@github.com
|
8c891499686c46192eb6e8fdbff3d1ff45edece6
|
d40b1042ab0c3506a35c65889f0df73405fb66b0
|
/namespaces.py
|
639b3bac194dbcc687399605a172bbb5c8b7d97f
|
[] |
no_license
|
juanique/ucursos-rdf
|
18a7e92789eb5f035e798d12128458bd6133af57
|
93c6c78876b99ecb253e0e98144e11ec39d9903e
|
refs/heads/master
| 2020-05-31T06:24:25.110260
| 2012-05-03T03:01:52
| 2012-05-03T03:01:52
| 4,177,485
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
from rdflib import Namespace
NFO = Namespace('http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#')
NIE = Namespace('http://www.semanticdesktop.org/ontologies/2007/01/19/nie#')
RDFS = Namespace('http://www.w3.org/2000/01/rdf-schema#')
RDF = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
CLIP = Namespace('http://www.rdfclip.com/resource/')
CLIPS = Namespace('http://www.rdfclip.com/schema#')
XMLS = Namespace('http://www.w3.org/2001/XMLSchema#')
UCHILE = Namespace('http://www.rdfclip.com/resources/uchile#')
UCHILES = Namespace('http://www.rdfclip.com/schema/uchile#')
|
[
"juanique@ubuntu.(none)"
] |
juanique@ubuntu.(none)
|
61c9e42c66d7a8db5e32e7b93993184e39839ae7
|
69310bc4a04d4f9cbbb27a4c322c5d7d8fb3d0c3
|
/Tests/test_PopulationSample.py
|
bd4a0e308ae4df676de830ec8d97e519ca0cd2d5
|
[] |
no_license
|
jaylakhani14/statcalc
|
15ea52e787cb75cd767d6e3564fcefd7d06d7536
|
e28a233c06ff236b157494d2df31dea15c8dc328
|
refs/heads/master
| 2023-03-31T19:39:41.400843
| 2021-03-23T01:16:08
| 2021-03-23T01:16:08
| 344,951,449
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
import unittest
import numpy as np
import scipy
from PopulationSampling.simpleRandom import Sample
from PopulationSampling.confidence import Confidence
class MyTestCase(unittest.TestCase):
def test_simple_random_sample(self):
mylist = ["apple", "banana", "cherry"]
Sample.sample(mylist, 2)
def test_confidence_interval(self):
sample = [1, 2, 3, 4, 5, 6, 7, 8, 9]
c = 0.95
df = len(sample) - 1
sample_mean = np.mean(sample)
sample_standard_error = scipy.stats.sem(sample)
Confidence.mean_confidence_interval(c, df, sample_mean, sample_standard_error)
if __name__ == '__main__':
unittest.main()
|
[
"fjt7@njit.edu"
] |
fjt7@njit.edu
|
9b3c43e2159eccae5ed0ba80f67a626831e04c62
|
722d8f2aa69095dbdbe32ecdeebb4bcf20e2ea3c
|
/tests/test_issue10_17.py
|
e54ddd56630de1e5e811d12e7ec3f3da0d804333
|
[] |
permissive
|
clach04/pyqrcodeNG
|
1bad85bea9c2a5ca9bcda8849e35af51f8e394f3
|
8033a915eca1946537002b8b271ea2cddb4e004b
|
refs/heads/master
| 2022-04-23T00:18:36.478138
| 2020-01-21T22:28:44
| 2020-01-21T22:28:44
| 259,402,644
| 0
| 0
|
BSD-3-Clause
| 2020-04-27T17:24:36
| 2020-04-27T17:24:35
| null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
# -*- coding: utf-8 -*-
"""\
Tests against <https://github.com/mnooner256/pyqrcode/issues/17> and
<https://github.com/heuer/pyqrcode/issues/10>
Unicode issues.
"""
from __future__ import unicode_literals
import pyqrcodeng as pyqrcode
def test_issue_10_17():
qr = pyqrcode.create('John’s Pizza')
assert qr
assert 'binary' == qr.mode
if __name__ == '__main__':
import pytest
pytest.main([__file__])
|
[
"heuer@semagia.com"
] |
heuer@semagia.com
|
a6db5964253e81c368fcf13b0b31ecd3de6e3376
|
4eeb7257a706967a049ea0e020bdd8d05f987c21
|
/working/kub-nginx.py
|
f5b3cb82ed777fedc2d42f605087d169d7903549
|
[] |
no_license
|
monolive/airflow-dags
|
6e1451261f370ec9707a3226ad2ea4498d628faa
|
e1191d2193ca428f331c7ac0d09c89056893f891
|
refs/heads/master
| 2020-04-10T13:32:33.789760
| 2019-02-25T08:59:57
| 2019-02-25T08:59:57
| 161,053,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
#from airflow.contrib.operators import KubernetesOperator
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.contrib.kubernetes.secret import Secret
seven_days_ago = datetime.combine(datetime.today() - timedelta(7), datetime.min.time())
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': seven_days_ago,
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
'pool': 'kube',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG( 'nginx', default_args=default_args, schedule_interval=timedelta(days=1), dagrun_timeout=timedelta(minutes=5),)
start = DummyOperator(task_id='run_this_first', dag=dag)
boom = KubernetesPodOperator(namespace='airflow',
image="nginx:latest",
image_pull_policy="Always",
# cmds=["bash","-cx"],
# arguments=["echo","10"],
name="nginx",
task_id="startNginx",
is_delete_operator_pod=True,
hostnetwork=False,
dag=dag,
in_cluster=False,
)
boom.set_upstream(start)
|
[
"Olivier.Renault@vanquisbank.co.uk"
] |
Olivier.Renault@vanquisbank.co.uk
|
ae28eeb606bc9e3c7a1b0a8586dd6305705295bb
|
add83856f85c4134a524671121212e32e9a0ade2
|
/aula4.py
|
6fb7f45e441fefbeecbb28f0bed9ee88a5a5bf73
|
[] |
no_license
|
priscilasvn10/Estudando_Python
|
671e3842c7b4e4de29e86cd7abe23f307d4a2ab8
|
ffef655a9c3282ac158435b3263928cd6cb531bd
|
refs/heads/master
| 2022-11-06T20:57:46.375239
| 2020-06-11T11:54:16
| 2020-06-11T11:54:16
| 271,512,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
a = int(input('Digite o primeito valor: '))
b = int(input('Digite o primeito valor: '))
soma = a + b
media = soma/2
print('O valor da soma é: {soma}'.format(soma=soma))
print('\n A média é {}'.format(media))
|
[
"noreply@github.com"
] |
priscilasvn10.noreply@github.com
|
b9d57dfeaf96a51267c065beaf179dce02cef79a
|
d5905c424264e9376fb80871749865140fbd3fb1
|
/MPCNN-sentence-similarity-tensorflow/embedding.py
|
cdf6713bff57f004be2f5dcc433e2611a2f190a8
|
[] |
no_license
|
eric-seekas/sentence_similarity
|
456e29ecb53e63a6f0ab804acfc6e919e1a1458c
|
647e90b1c43ab838fe857f87d3a2d5364112ff9b
|
refs/heads/master
| 2021-02-08T18:20:45.443720
| 2018-05-22T10:50:17
| 2018-05-22T10:50:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
"""
A toolkit you may find useful for mapping sentences to embeddings.
Download and unzip the standard GloVe embeddings to use this.
Skip-thoughts use unigram/bigram information from the Children Book dataset.
"""
from __future__ import print_function
import numpy as np
class Embedder(object):
""" Generic embedding interface.
Required:
* w: dict mapping tokens to indices
* g: matrix with one row per token index
* N: embedding dimensionality
"""
def map_tokens(self, tokens, ndim=2):
""" for the given list of tokens, return a list of GloVe embeddings,
or a single plain bag-of-words average embedding if ndim=1.
Unseen words (that's actually *very* rare) are mapped to 0-vectors. """
gtokens = [self.g[self.w[t]] for t in tokens if t in self.w]
if not gtokens:
return np.zeros((1, self.N)) if ndim == 2 else np.zeros(self.N)
gtokens = np.array(gtokens)
if ndim == 2:
return gtokens
else:
return gtokens.mean(axis=0)
def map_set(self, ss, ndim=2):
""" apply map_tokens on a whole set of sentences """
return [self.map_tokens(s, ndim=ndim) for s in ss]
def map_jset(self, sj):
""" for a set of sentence emb indices, get per-token embeddings """
return self.g[sj]
def pad_set(self, ss, spad, N=None):
""" Given a set of sentences transformed to per-word embeddings
(using glove.map_set()), convert them to a 3D matrix with fixed
sentence sizes - padded or trimmed to spad embeddings per sentence.
Output is a tensor of shape (len(ss), spad, N).
To determine spad, use something like
np.sort([np.shape(s) for s in s0], axis=0)[-1000]
so that typically everything fits, but you don't go to absurd lengths
to accomodate outliers.
"""
ss2 = []
if N is None:
N = self.N
for s in ss:
if spad > s.shape[0]:
if s.ndim == 2:
s = np.vstack((s, np.zeros((spad - s.shape[0], N))))
else: # pad non-embeddings (e.g. toklabels) too
s = np.hstack((s, np.zeros(spad - s.shape[0])))
elif spad < s.shape[0]:
s = s[:spad]
ss2.append(s)
return np.array(ss2)
class GloVe(Embedder):
""" A GloVe dictionary and the associated N-dimensional vector space """
def __init__(self, N=50, glovepath='/media/jlan/E/Projects/nlp/数据集/glove.6B/glove.6B.%dd.txt'):
""" Load GloVe dictionary from the standard distributed text file.
Glovepath should contain %d, which is substituted for the embedding
dimension N. """
self.N = N
self.w = dict()
self.g = []
self.glovepath = glovepath % (N,)
# [0] must be a zero vector
self.g.append(np.zeros(self.N))
with open(self.glovepath, 'r') as f:
for line in f:
l = line.split()
word = l[0]
self.w[word] = len(self.g)
self.g.append(np.array(l[1:]).astype(float))
self.w['UKNOW'] = len(self.g)
self.g.append(np.zeros(self.N))
self.g = np.array(self.g, dtype='float32')
|
[
"cbj_love@126.com"
] |
cbj_love@126.com
|
0292b72004bd85deca84805fc86f18693d557717
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/JFLADuABfkeoz8mqN_5.py
|
418dc4c327c7b6b0b704d40cb3c93aa5c599e590
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def compare_age(self, other):
modifier = ''
if self.age < other.age:
modifier = 'older than'
elif self.age == other.age:
modifier = 'the same age as'
else:
modifier = 'younger than'
return '{n} is {m} me.'.format(n = other.name, m = modifier)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fbc8f567fc64ca0f8626efc764da3297fb9e5918
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/contrib/tensor_forest/hybrid/python/models/stochastic_soft_decisions_to_data_then_nn.py
|
e916a8734c524c83772576e38418e6667fa6b0c9
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A hybrid model that samples paths when training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.contrib.tensor_forest.hybrid.python.models import hard_decisions_to_data_then_nn
from tensorflow.python.training import adagrad
class StochasticSoftDecisionsToDataThenNN(
hard_decisions_to_data_then_nn.HardDecisionsToDataThenNN):
"""A hybrid model that samples paths when training."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(StochasticSoftDecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.StochasticSoftDecisionsToDataLayer(
params, 0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
46a326cd6ade0459a980a6bbdfafd899baf525e5
|
0208cbf2f0ea3c5405f2f8e3e74367df3916b999
|
/venv/bin/easy_install-3.6
|
cb43565f07cca22102c5e0b7d32db2c448086c7c
|
[
"MIT"
] |
permissive
|
BHushanRathod/ecolibrium-assessment
|
aaa111ea7ba5e495f7deff5f2e59c2a7816dfd2e
|
b7a4f1ebb69094bcb0c2d39fc94df3cd93c73fb0
|
refs/heads/master
| 2022-05-02T14:45:36.088566
| 2021-07-12T10:20:51
| 2021-07-12T10:20:51
| 172,217,130
| 0
| 0
|
MIT
| 2022-04-22T21:01:35
| 2019-02-23T13:23:35
|
Python
|
UTF-8
|
Python
| false
| false
| 468
|
6
|
#!/Users/bhushan/Truso1.0/Experiment/ecolibrium_assessment/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"abhushanprathod@gmail.com"
] |
abhushanprathod@gmail.com
|
25a420ee9e5512309aafd20ed676820b038209ea
|
b86608b6de44642ed29cd88bba4acbbdd31a0b04
|
/tools/agile-machine-learning-api/codes/trainer/launch_demo.py
|
8bd90b27237627cf0a348cdc757db624b062bda7
|
[
"Apache-2.0"
] |
permissive
|
MCRen88/professional-services
|
a514a926dd23e3c4ac6dadb656faed22c3d91d5d
|
d7bc3b194159ffdb149c9507890bb1fbae7a8d88
|
refs/heads/master
| 2020-12-15T16:38:17.860940
| 2020-01-06T19:29:47
| 2020-01-06T19:29:47
| 235,181,173
| 1
| 0
|
Apache-2.0
| 2020-01-20T19:26:15
| 2020-01-20T19:26:14
| null |
UTF-8
|
Python
| false
| false
| 21,978
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to End script draft."""
import argparse
import ast
import os
import time
import numpy as np
import six
import tensorflow as tf
from tensorflow.contrib.training.python.training import hparam
from input_pipeline_dask import InputReader, BasicStats, DatasetInput
from models import CannedModel, CustomModel
from utils.metric_utils import mean_acc, mar, my_auc, rmse
from utils.optimizer_utils import Optimizer
class Config(object):
"""
Creates a run config object for training an estimator.
Object to be initialized using default parameters or
can be parsed by the user.
"""
def __init__(
self,
model_dir=None,
tf_random_seed=None,
save_summary_steps=100,
save_checkpoints_steps=None,
save_checkpoints_secs=120,
session_config=None,
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
train_distribute=None):
"""Initializes the config object.
Arguments:
model_dir : string, directory where the checkpoints are stored
tf_random_seed : integer, seed to set for random initialization
save_summary_steps : integer, number of global steps to save summaries
save_checkpoints_steps ; integer, number of global steps to save checkpoints
save_checkpoints_secs : integer, number of seconds to save checkpoints
session_config : object, a config proto used to set session parameters
keep_checkpoint_max : integer, maximum number of checkpoints to be stored
keep_checkpoint_every_n_hours : integer, frequency of saving checkpoints
log_step_count_steps : integer, frequency of steps to log information
train_distribute : tf.distribute.Strategy object, distribution strategy for training
"""
self.model_dir = model_dir
self.tf_random_seed = tf_random_seed
self.save_summary_steps = save_summary_steps
self.save_checkpoints_steps = save_checkpoints_steps
self.save_checkpoints_secs = save_checkpoints_secs
self.session_config = session_config
self.keep_checkpoint_max = keep_checkpoint_max
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self.log_step_count_steps = log_step_count_steps
self.train_distribute = train_distribute
def set_config(self):
"""
Sets the Run config object with the parameters parsed by the user
"""
self.RunConfig = tf.estimator.RunConfig(
model_dir=self.model_dir,
tf_random_seed=self.tf_random_seed,
save_summary_steps=self.save_summary_steps,
session_config=self.session_config,
save_checkpoints_steps=self.save_checkpoints_steps,
save_checkpoints_secs=self.save_checkpoints_secs,
keep_checkpoint_max=self.keep_checkpoint_max,
keep_checkpoint_every_n_hours=self.keep_checkpoint_every_n_hours,
log_step_count_steps=self.log_step_count_steps,
train_distribute=self.train_distribute)
def get_config(self):
"""
Get Config object with parameters parsed by the user
Returns: tf.estimator.RunConfig object for estimator training
"""
return self.RunConfig
def get_is_chief(self):
"""
Get _is_chief boolean from RunConfig object
Returns: tf.estimator.RunConfig object for estimator training
"""
return self.RunConfig._is_chief
def prep_input(
csv_path,
task_type,
target_var,
na_values,
column_names,
to_drop,
gcs_path,
data_type,
name):
"""
Preprocessing function for train and eval datasets.
Arguments:
csv_path : str, path of the csv file
task_type : string, ML task at hand, following options are expected
[classification, regression, clustering]
target_var : string, Name of the dependent/target variable
na_values : string, String by which the na values are represented in the data
column_names : string, Names of the columns passed in a text file
to_drop : list, Any redundant columns which can be dropped
gcs_path : boolean, Whether the csv is stored on google cloud storage
data_type : dict, dictionary containing the data type of all columns in format
{'a': 'float', 'b': 'object', 'c': 'int' }
name : str, name of the data being based [train, eval]
Returns:
df : dask.DataFrame object, dataframe containing cleaned data of the passed csv file
cols : list, list containing column names of the data
defaults : list, list containing defaults of the columns
mapped : dict, dictionary containing vocabulary of the categorical columns
mean : pandas.Series, pandas series containing mean values of continous columns
std_dev : pandas.Series, pandas series containing standard deviation values of continous columns
"""
inp = InputReader(
csv_path=csv_path,
task_type=task_type,
target_var=target_var,
na_values=na_values,
column_names=column_names,
to_drop=to_drop,
gcs_path=gcs_path,
data_type=data_type)
df, cols = inp.parse_csv_wrap()
stats = BasicStats()
df, mean, std_dev, defaults = stats.clean_data(
df=df,
target_var=inp.target_var,
task_type=task_type,
name=name)
mapped = stats.find_vocab(df=df)
mapped.pop(inp.target_var)
return df, cols, defaults, mapped, mean, std_dev
def create_deep_cols(feat_cols, name):
"""Creates embedding and indicator columns for canned DNNclassifier.
Arguments:
feat_cols : list, A list of feature column objects.
name : string, name of the task in hand
Returns:
A list of feature column objects.
"""
deep_cols = None
if name not in ['linearclassifier', 'linearregressor',
'polynomialclassifier', 'polynomialregressor']:
deep_cols = list()
for i in feat_cols:
if i.dtype == 'string':
i = tf.feature_column.indicator_column(i)
deep_cols.append(i)
return deep_cols
def none_or_str(value):
"""
Creates a nonetype argument from command line.
Arguments:
value : The keyword argument from command line
Returns:
None if the string none is found
"""
if value == 'None':
return None
return value
def convert_to_list(value):
"""
Creates a list argument from command line.
Arguments:
value : The keyword argument from command line
Returns:
None if the string none is found
list if the string is space seperated values is found
"""
if value == 'None':
return None
return value.split(' ')
def convert_to_dict(value):
"""
Creates a dict argument from command line.
Arguments:
value : The keyword argument from command line
Returns:
None if the string none is found
dict if the string is space seperated values is found
"""
if value == 'None':
return None
return ast.literal_eval(value)
def run_experiment(hparams):
"""
Arguments:
hparams : tf.contrib.training.HParams object, contains all the arguments
as a set of key value pairs
Sets up the experiment to be launched on cloud machine learning engine
"""
a = time.time()
_, csv_cols, csv_defaults, mapped, mean, std_dev = prep_input(
csv_path=hparams.train_csv_path,
task_type=hparams.task_type,
target_var=hparams.target_var,
na_values=hparams.na_values,
column_names=hparams.column_names,
to_drop=hparams.to_drop,
gcs_path=hparams.gcs_path,
data_type=hparams.data_type,
name='train')
_, _, _, _, _, _ = prep_input(
csv_path=hparams.eval_csv_path,
task_type=hparams.task_type,
target_var=hparams.target_var,
na_values=hparams.na_values,
column_names=hparams.column_names,
to_drop=hparams.to_drop,
gcs_path=hparams.gcs_path,
data_type=hparams.data_type,
name='eval')
data = DatasetInput(
num_epochs=hparams.num_epochs,
batch_size=hparams.batch_size,
buffer_size=hparams.buffer_size,
csv_defaults=csv_defaults,
csv_cols=csv_cols,
target_var=hparams.target_var,
task_type=hparams.task_type,
condition=hparams.condition)
feature_cols = data.create_feature_columns_wrap(
dictionary=mapped,
mean=mean,
std_dev=std_dev)
b = time.time()
tf.logging.info('Parse time is : %s', b - a)
if hparams.name == 'kmeanscluster':
def train_input():
return data.kmeans_input_fn('train')
def eval_input():
return data.kmeans_input_fn('eval')
else:
def train_input():
return data.input_fn('train')
def eval_input():
return data.input_fn('eval')
def json_serving_input_fn():
"""
Build the serving inputs.
Returns: Serving input function for JSON data
"""
inputs = {}
for feat in feature_cols:
inputs[feat.name] = tf.placeholder(
shape=[None], dtype=feat.dtype, name=feat.name)
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
def parse_csv(rows_string_tensor):
"""
Takes the string input tensor and returns a dict of rank-2 tensors.
Arguments:
rows_string_tensor : tf.Tensor object, Tensor of the prediction datapoint
Returns:
features : tensor objects of features for inference
"""
columns = tf.decode_csv(
rows_string_tensor, record_defaults=csv_defaults)
features = dict(zip(csv_cols, columns))
for key, _ in six.iteritems(features):
features[key] = tf.expand_dims(features[key], -1)
return features
def csv_serving_input_fn():
"""
Build the serving inputs.
Returns: Serving input function for CSV data
"""
csv_row = tf.placeholder(
shape=[None],
dtype=tf.string)
features = parse_csv(rows_string_tensor=csv_row)
return tf.estimator.export.ServingInputReceiver(
features,
{'csv_row': csv_row})
serving_functions = {
'JSON': json_serving_input_fn,
'CSV': csv_serving_input_fn
}
config_obj = Config(
model_dir=hparams.job_dir,
tf_random_seed=hparams.seed,
save_summary_steps=hparams.save_summary_steps,
session_config=None,
save_checkpoints_secs=hparams.save_checkpoints_secs,
save_checkpoints_steps=hparams.save_checkpoints_steps,
keep_checkpoint_max=hparams.keep_checkpoint_max,
keep_checkpoint_every_n_hours=hparams.keep_checkpoint_every_n_hours,
log_step_count_steps=hparams.log_step_count_steps,
train_distribute=hparams.distribute_strategy)
config_obj.set_config()
config = config_obj.get_config()
opt = Optimizer()
def linear_optimizer():
return opt.set_opt_wrap(
hparams.lin_opt,
hparams.learning_rate,
hparams.lr_rate_decay)
def deep_optimizer():
return opt.set_opt_wrap(
hparams.deep_opt,
hparams.learning_rate,
hparams.lr_rate_decay)
def poly_optimizer():
return opt.set_opt_wrap(
hparams.poly_opt,
hparams.learning_rate,
hparams.lr_rate_decay)
deep_cols = create_deep_cols(feature_cols, hparams.name)
hidden_units = [hparams.hidden_units]
feature_names = list(csv_cols)
feature_names.remove(hparams.target_var)
if hparams.name not in ['polynomialclassifier', 'polynomialregressor']:
model = CannedModel(
model_name=hparams.name,
feature_columns=feature_cols,
deep_columns=deep_cols,
hidden_units=hidden_units,
n_classes=hparams.n_classes,
linear_optimizer=linear_optimizer,
dnn_optimizer=deep_optimizer,
activation_fn=hparams.activation_fn,
dropout=hparams.dropout,
batch_norm=hparams.batch_norm,
config=config)
else:
model = CustomModel(
model_name=hparams.name,
batch_size=hparams.batch_size,
optimizer=poly_optimizer,
model_dir=hparams.job_dir,
config=config,
feature_names=feature_names,
learning_rate=hparams.learning_rate)
def mean_acc_metric(labels, predictions):
"""
Defining mean per class accuracy metric
Arguments:
labels : labels of the data
predictions : prediction of the model
Returns: function defining mean per class accuracy metric
"""
return mean_acc(labels, predictions, hparams.n_classes)
estimator = model.build_model()
if data.task_type == 'classification' and hparams.n_classes == 2:
estimator = tf.contrib.estimator.add_metrics(estimator, my_auc)
elif hparams.n_classes > 2:
estimator = tf.contrib.estimator.add_metrics(
estimator, mean_acc_metric)
else:
estimator = tf.contrib.estimator.add_metrics(estimator, rmse)
estimator = tf.contrib.estimator.add_metrics(estimator, mar)
if hparams.early_stopping:
old_loss = np.inf
for _ in range(hparams.eval_times):
estimator.train(input_fn=train_input,
steps=hparams.train_steps // hparams.eval_times)
output = estimator.evaluate(
input_fn=eval_input, steps=hparams.eval_steps)
loss = output['loss']
if loss >= old_loss:
tf.logging.info(
'EARLY STOPPING....... LOSS SATURATED AT : %s', loss)
break
else:
old_loss = loss
else:
train_spec = tf.estimator.TrainSpec(
train_input,
hparams.train_steps)
eval_spec = tf.estimator.EvalSpec(
eval_input,
hparams.eval_steps,
throttle_secs=hparams.eval_freq)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if config_obj.get_is_chief():
estimator.export_savedmodel(
hparams.export_dir,
serving_functions[hparams.export_format],
assets_extra={
'lime_explainer': '/tmp/lime_explainer'},
strip_default_attrs=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_csv_path',
type=convert_to_list,
help='CSV file path[s] either on local or GCS',
required=True)
parser.add_argument(
'--eval_csv_path',
help='CSV file path for model eval',
required=True)
parser.add_argument(
'--task_type',
help='Machine learning task at hand',
required=True)
parser.add_argument(
'--target_var',
help='Name of the target variable',
required=True)
parser.add_argument(
'--data_type',
help='schema of the input data',
type=convert_to_dict,
default=None)
parser.add_argument(
'--column_names',
type=none_or_str,
help='text file containing column names',
default=None)
parser.add_argument(
'--num_clusters',
type=int,
help='number of clusters',
default=3)
parser.add_argument(
'--to_drop',
type=convert_to_list,
help='list of columns that can be dropped',
default=None)
parser.add_argument(
'--na_values',
type=none_or_str,
help='string by which na values are represented',
default=None)
parser.add_argument(
'--condition',
type=none_or_str,
help='logic to turn the target variable into levels',
default=None)
parser.add_argument(
'--gcs_path',
help='Whether the csv is on GCS',
default=True)
parser.add_argument(
'--num_epochs',
help='number of epochs for dataset to repeat',
type=int,
default=50)
parser.add_argument(
'--batch_size',
help='batch size to train and eval the model',
type=int,
default=64)
parser.add_argument(
'--buffer_size',
help='buffer size for prefetch',
type=int,
default=64)
parser.add_argument(
'--n_classes',
help='number of levels in target var',
default=2,
type=int)
parser.add_argument(
'--train_steps',
help='number of steps to train the model',
type=int,
default=50000)
parser.add_argument(
'--eval_steps',
help='number of eval batches to run',
type=int,
default=100)
parser.add_argument(
'--job-dir',
help='directory to store model checkpoints',
type=str,
default='/temp')
parser.add_argument(
'--seed',
help='seed to set for random initialization',
default=None)
parser.add_argument(
'--save_summary_steps',
help='number of global steps to save summaries',
type=int,
default=100)
parser.add_argument(
'--save_checkpoints_steps',
help='number of global steps to save checkpoints',
type=int,
default=500)
parser.add_argument(
'--save_checkpoints_secs',
help='number of seconds after which to save checkpoints',
type=int,
default=None)
parser.add_argument(
'--keep_checkpoint_max',
help='max number of checkpoints to save',
type=int,
default=5)
parser.add_argument(
'--keep_checkpoint_every_n_hours',
help='save checkpoint frequency',
type=int,
default=10000)
parser.add_argument(
'--log_step_count_steps',
help='how frequently to log information',
type=int,
default=100)
parser.add_argument(
'--distribute_strategy',
help='distribution strategy to use for training',
type=none_or_str,
default=None)
# model params
parser.add_argument(
'--name',
help='name of the model you want to use',
required=True,
choices=['linearclassifier', 'linearregressor',
'dnnclassifier', 'dnnregresssor', 'combinedclassifier',
'combinedregressor', 'kmeanscluster'])
parser.add_argument(
'--hidden_units',
help='number of hidden units in each layer of dnn',
type=int,
default=64
)
parser.add_argument(
'--num_layers',
help='number of hidden layers',
type=int,
default=2)
parser.add_argument(
'--lin_opt',
help='optimizer to use for linear models',
type=str,
default='ftrl')
parser.add_argument(
'--deep_opt',
help='optimizer to use for NN models',
type=str,
default='adam')
parser.add_argument(
'--lr_rate_decay',
help='whether to use learninf=g rate decay',
type=bool,
default=False)
parser.add_argument(
'--activation_fn',
help='activation fn to use for hidden units',
default=tf.nn.relu)
parser.add_argument(
'--dropout',
help='dropout rate for hidden layers',
default=None)
parser.add_argument(
'--batch_norm',
help='whether to use batch norm for hidden layers',
default=False)
parser.add_argument(
'--learning_rate',
help='learning rate for model training',
type=float,
default=0.001)
parser.add_argument(
'--eval_freq',
help='frequency in seconds to trigger evaluation run',
type=int,
default=30)
parser.add_argument(
'--eval_times',
help='early stopping criteria',
type=int,
default=10)
parser.add_argument(
'--early_stopping',
help='how to define when model training should end',
type=bool,
default=False)
parser.add_argument(
'--export_dir',
help='Directory for storing the frozen graph',
type=str,
required=True)
parser.add_argument(
'--export_format',
help='Format for the serving inputs',
type=str,
default='JSON')
parser.add_argument(
'--logging_level',
help='Format for the serving inputs',
type=str,
default='INFO',
choices=['INFO', 'DEBUG', 'ERROR', 'FATAL', 'WARN'])
args = parser.parse_args()
hparams = hparam.HParams(**args.__dict__)
tf.logging.set_verbosity(hparams.logging_level)
run_experiment(hparams)
os.system('rm -r /tmp/*.csv')
|
[
"jferriero@google.com"
] |
jferriero@google.com
|
90db28e0b00420ae8ba6a75d46a68285b150f3e2
|
bd7cbd805417429af1a6636eff86857b28162525
|
/encapsulation.py
|
c4a5fe1d25ea847720f29c90dc5c902265b1d92c
|
[] |
no_license
|
Qannaf/Python
|
d92cb9bad34de2a6814520f6487d548e93bfb52c
|
b8d23cdda6f5901bd176a4274f1b8d389267dc85
|
refs/heads/main
| 2023-04-28T09:03:30.854096
| 2021-05-22T08:35:51
| 2021-05-22T08:35:51
| 356,817,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
#coding:utf-8
#voila mon 12émé code en python
class Humain:
""""Classe qui définit un humain"""
def __init__(self,nom ,age): #constrecture
self.nom = nom
self._age = age
def _getage(self):
#print("Récupération interdite")
if self._age<= 1:
return "{} {}".format(self._age,"an")
return "{} {}".format(self._age,"ans")
def _setage(self,nouvel_age):
if nouvel_age<0:
self._age=0
else:
self._age = nouvel_age
#propreité <getter>,<setter>,<deleter>,<helper>
age = property(_getage,_setage)
#prgm principale
h1 = Humain("Qannaf",27)
print(h1.nom,"\t",h1.age)
h1.nom = "ALSAHMI"
h1.age = 1
print(h1.nom,"\t",h1.age)
print("{} {}".format(h1.nom,h1.age))
|
[
"qannafalsahmi@gmail.com"
] |
qannafalsahmi@gmail.com
|
f519f8ae0d7bee8960df8f6c8053f6c930b0db76
|
a98eab432108d65c8546c44cded7808463b844a4
|
/common/libs/UploadService.py
|
1855eb6afd457e0dd62cbeb931b35ad635c2e266
|
[] |
no_license
|
RocketWill/Qamar
|
287ec3d5ad2ec7b3435c93def2d85bbe7d94d928
|
63e41d333559baf52a9010850bc4c2713ac0b93d
|
refs/heads/master
| 2020-04-21T23:55:59.459620
| 2019-03-19T12:00:02
| 2019-03-19T12:00:02
| 169,962,064
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
# -*- coding: utf-8 -*-
from werkzeug.utils import secure_filename
from application import app, db
import os, stat, uuid
from common.libs.Helper import getCurrentDate
from common.models.Image import Image
class UploadService():
@staticmethod
def uploadByFile(file):
config_upload = app.config['UPLOAD']
resp = {'code': 200, 'msg': "操作成功", "data": {}}
filename = secure_filename(file.filename)
ext = filename.rsplit(".", 1)[1]
if ext not in config_upload['ext']:
resp['code'] = -1
resp['msg'] = "不允許的擴展類型文件"
return resp
root_path = app.root_path + config_upload['prefix_path']
file_dir = getCurrentDate("%Y%m%d")
save_dir = root_path + file_dir
if not os.path.exists(save_dir):
os.mkdir(save_dir)
os.chmod(save_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IRWXO)
filename = str(uuid.uuid4()).replace("-", "") + "." + ext
file.save("{0}/{1}".format(save_dir, filename))
model_image = Image()
model_image.file_key = file_dir + "/" + filename
model_image.created_time = getCurrentDate()
db.session.add(model_image)
db.session.commit()
resp['data'] = {
"file_key": model_image.file_key
}
return resp
|
[
"willcy1006@163.com"
] |
willcy1006@163.com
|
29ddf0ad878bb01613933dec9c0ea4d5d69f4d37
|
e88de870db4ed98a39a1e7661f4662ba9d465b84
|
/fixture/session.py
|
2ab29c2141001defd40b5ad6e7da6669a31fc3a5
|
[] |
no_license
|
TatianaMoskvina/PainPoints
|
f11c371115e21da284e3375581ff4a485b38e495
|
291782aab76bc7b7b1cbdd493a0837b25c65a92c
|
refs/heads/master
| 2020-04-13T04:02:43.248112
| 2019-01-24T05:59:02
| 2019-01-24T05:59:02
| 162,949,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
class SessionHelper:
def __init__(self, app):
self.app = app
def log_out(self):
# log out
wd = self.app.wd
wd.find_element_by_id("myProfile").click()
wd.find_element_by_link_text("Log Out").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.log_out()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elemens_by_link_text("myProfile")) > 0
def ensure_log_in(self, username):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.log_out()
def as_logged_in_as(self):
wd = self.app.wd
username_login = wd.find_element_by_css_selector("div.name").text
mass_of_username = username_login.split(' ')
for i in mass_of_username:
if i == 'CompanyOwner)':
return i
return i == "CompanyOwner)"
def log_in(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_id("Email").clear()
wd.find_element_by_id("Email").send_keys("%s" % username)
wd.find_element_by_id("Password").clear()
wd.find_element_by_id("Password").send_keys("%s" % password)
wd.find_element_by_xpath( "(.//*[normalize-space(text()) and normalize-space(.)='Password'])[1]/following::input[2]").click()
|
[
"moskvina.tania.vit@gmail.com"
] |
moskvina.tania.vit@gmail.com
|
50c51970784b45eb11a6e6b9407324632f40fbe4
|
5fc1bb8df5af751e855ccdb224a37a7afb473d6e
|
/setup.py
|
0af5652144881f9fac8ad445e1c7b171c6d9d2ed
|
[
"BSD-3-Clause"
] |
permissive
|
areagle-stripe/SimpleML
|
c316ddaba7cac8dc709c4d9030e0de1897dc007a
|
10a2167a73570bc923c202930fa75ba56d46c504
|
refs/heads/master
| 2020-03-14T14:31:29.949925
| 2018-04-09T02:37:06
| 2018-04-09T02:37:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from setuptools import setup, find_packages
setup(
name='simpleml',
version='0.1',
description='Simplified Machine Learning',
author='Elisha Yadgaran',
author_email='ElishaY@alum.mit.edu',
license='BSD-3',
url='https://github.com/eyadgaran/SimpleML',
download_url='https://github.com/eyadgaran/SimpleML/archive/0.1.tar.gz',
packages=find_packages(),
keywords = ['machine-learning'],
install_requires=[
'sqlalchemy',
'sqlalchemy_mixins',
'psycopg2',
'scikit-learn',
'numpy'
],
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose']
)
|
[
"ElishaY@alum.mit.edu"
] |
ElishaY@alum.mit.edu
|
b6d557a679a01d00df7d8e053588222bcee90375
|
4b100e0519f3362554bac7434baac61a1d08ddd2
|
/third_party/ros_aarch64/lib/python2.7/dist-packages/dynamic_reconfigure/cfg/TestConfig.py
|
8cb1e13bddbb1e0b79add5109e2837449bfbccae
|
[
"Apache-2.0"
] |
permissive
|
YapingLiao/apollo1.0
|
17002fefaf01e0ee9f79713fd436c8c3386208b6
|
6e725e8dd5013b769efa18f43e5ae675f4847fbd
|
refs/heads/master
| 2020-06-18T13:04:52.019242
| 2018-01-29T01:50:43
| 2018-01-29T01:50:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,004
|
py
|
## *********************************************************
##
## File autogenerated for the dynamic_reconfigure_test package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 246, 'name': 'Default', 'parent': 0, 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [{'upper': 'GROUP_ONE', 'lower': 'group_one', 'srcline': 123, 'name': 'Group_One', 'parent': 0, 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT::GROUP_ONE', 'field': 'DEFAULT::group_one', 'state': True, 'parentclass': 'DEFAULT', 'groups': [{'upper': 'GROUP2', 'lower': 'group2', 'srcline': 123, 'name': 'GROUP2', 'parent': 1, 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'false', 'parentname': 'Group_One', 'class': 'DEFAULT::GROUP_ONE::GROUP2', 'field': 'DEFAULT::GROUP_ONE::group2', 'state': False, 'parentclass': 'DEFAULT::GROUP_ONE', 'groups': [{'upper': 'GROUP3', 'lower': 'group3', 'srcline': 123, 'name': 'Group3', 'parent': 2, 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'GROUP2', 'class': 'DEFAULT::GROUP_ONE::GROUP2::GROUP3', 'field': 'DEFAULT::GROUP_ONE::GROUP2::group3', 'state': True, 'parentclass': 'DEFAULT::GROUP_ONE::GROUP2', 'groups': [], 'parameters': [{'srcline': 70, 'description': 'Were very far down now', 'max': 3, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'deep_var_int', 'edit_method': "{'enum_description': 'An enum to set the size.', 'enum': [{'srcline': 45, 'description': 'A small constant', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Small'}, {'srcline': 46, 'description': 'A medium value', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Medium'}, {'srcline': 47, 'description': 'A large value', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'Large'}, {'srcline': 48, 'description': 'An extra large value', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 3, 'ctype': 'int', 'type': 'int', 'name': 'ExtraLarge'}]}", 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 71, 'description': 'Were even farther down now!!', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'deep_var_bool', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 72, 'description': 'Were super far down now!!', 'max': inf, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'deep_var_double', 'edit_method': '', 'default': -1.0, 'level': 0, 'min': -inf, 'type': 'double'}], 'type': '', 'id': 3}], 'parameters': [{'srcline': 65, 'description': 'A third level group parameter', 'max': inf, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'group2_double', 'edit_method': '', 'default': 3.333, 'level': 0, 'min': -inf, 'type': 'double'}, {'srcline': 66, 'description': 'A third level group parameter', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'group2_string', 'edit_method': '', 'default': 'test string', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 67, 'description': 'Something', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'some_other', 'edit_method': '', 'default': 'AAAAAAGGHH', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 68, 'description': 'A boolean', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'variable', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}], 'type': 'collapse', 'id': 2}], 'parameters': [{'srcline': 63, 'description': 'A second level group parameter', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'group1_int', 'edit_method': '', 'default': 2, 'level': 1, 'min': -2147483648, 'type': 'int'}], 'type': '', 'id': 1}, {'upper': 'UPPER_GROUP_2', 'lower': 'upper_group_2', 'srcline': 123, 'name': 'Upper_Group_2', 'parent': 0, 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT::UPPER_GROUP_2', 'field': 'DEFAULT::upper_group_2', 'state': True, 'parentclass': 'DEFAULT', 'groups': [], 'parameters': [{'srcline': 75, 'description': 'Some param', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'name': 'some_param', 'edit_method': '', 'default': 20, 'level': 0, 'min': -2147483648, 'type': 'int'}], 'type': '', 'id': 4}], 'parameters': [{'srcline': 293, 'description': 'Int enum', 'max': 3, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'int_enum_', 'edit_method': "{'enum_description': 'An enum to set the size.', 'enum': [{'srcline': 45, 'description': 'A small constant', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Small'}, {'srcline': 46, 'description': 'A medium value', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Medium'}, {'srcline': 47, 'description': 'A large value', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'Large'}, {'srcline': 48, 'description': 'An extra large value', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/cfg/Test.cfg', 'cconsttype': 'const int', 'value': 3, 'ctype': 'int', 'type': 'int', 'name': 'ExtraLarge'}]}", 'default': 0, 'level': 1, 'min': 0, 'type': 'int'}, {'srcline': 293, 'description': 'Int parameter', 'max': 10, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'int_', 'edit_method': '', 'default': 0, 'level': 1, 'min': -10, 'type': 'int'}, {'srcline': 293, 'description': 'double parameter', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'double_', 'edit_method': '', 'default': 0.0, 'level': 2, 'min': -2.0, 'type': 'double'}, {'srcline': 293, 'description': 'double parameter without boundaries', 'max': inf, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'double_no_minmax', 'edit_method': '', 'default': 1.0, 'level': 2, 'min': -inf, 'type': 'double'}, {'srcline': 293, 'description': 'double parameter without max value', 'max': inf, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'double_no_max', 'edit_method': '', 'default': 2.0, 'level': 2, 'min': 0.0, 'type': 'double'}, {'srcline': 293, 'description': 'String parameter', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'str_', 'edit_method': '', 'default': 'foo', 'level': 4, 'min': '', 'type': 'str'}, {'srcline': 293, 'description': 'Multibyte String parameter', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'mstr_', 'edit_method': '', 'default': 'bar', 'level': 4, 'min': '', 'type': 'str'}, {'srcline': 293, 'description': 'Boolean parameter', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'bool_', 'edit_method': '', 'default': False, 'level': 8, 'min': False, 'type': 'bool'}, {'srcline': 293, 'description': 'Contains the level of the previous change', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'level', 'edit_method': '', 'default': 0, 'level': 16, 'min': -2147483648, 'type': 'int'}, {'srcline': 293, 'description': 'Checks against regression of #4499', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'int_nodefault', 'edit_method': '', 'default': 0, 'level': 0, 'min': -2147483648, 'type': 'int'}, {'srcline': 293, 'description': 'Checks against regression of https://github.com/ros/dynamic_reconfigure/issues/6', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/home/ubuntu/baidu/adu-lab/apollo/modules/ros/dynamic_reconfigure/src/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'i', 'edit_method': '', 'default': 0, 'level': 0, 'min': -2147483648, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
Test_int_const = 5
Test_double_const = 5.6
Test_str_const = 'foo'
Test_bool_const = True
Test_Small = 0
Test_Medium = 1
Test_Large = 2
Test_ExtraLarge = 3
|
[
"14552258@qq.com"
] |
14552258@qq.com
|
52ff178dec4c0fecc3a4ec0097dbff7f0a46b8ed
|
0062da4c92b6a621343347a36ee32128838c09ec
|
/hw6/hw66.py
|
d53ded6c0933a977859a413ec67f23285fc1ca50
|
[] |
no_license
|
Skotmak/Homeworks
|
d3bb39e1d6d960ec3ad1c5c74a0eb5508f1f1e15
|
071e3c3a9d1341b28dd3a4828788f422fd943367
|
refs/heads/master
| 2022-05-13T07:50:45.869093
| 2022-05-03T17:19:53
| 2022-05-03T17:19:53
| 226,465,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
class First:
color = "red"
form = "circle"
style = "cool"
def changecolor(self,newcolor):
self.color = newcolor
def changeform(self,newform):
self.form = newform
def changestyle(self,newstyle):
self.style = newstyle
class Second:
color = "red"
def changecolor(self, newcolor):
self.color = newcolor
obj1 = First()
obj2 = Second()
print (obj1.color, obj1.form, obj1.style)
print (obj2.color)
a=input("enter color: ")
obj1.changecolor(a)
if obj1.color == "green" :
obj2.changecolor("blue")
print (obj1.color)
print (obj2.color)
|
[
"belok9999@yandex.ru"
] |
belok9999@yandex.ru
|
274befd704407b98b615b5efb59b5c392ba2d396
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/sql/outbound_firewall_rule.py
|
5a8309fc8ccaf95892a167a53395faad58b8bd6a
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 7,974
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['OutboundFirewallRuleArgs', 'OutboundFirewallRule']
@pulumi.input_type
class OutboundFirewallRuleArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
outbound_rule_fqdn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a OutboundFirewallRule resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
if outbound_rule_fqdn is not None:
pulumi.set(__self__, "outbound_rule_fqdn", outbound_rule_fqdn)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="outboundRuleFqdn")
def outbound_rule_fqdn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "outbound_rule_fqdn")
@outbound_rule_fqdn.setter
def outbound_rule_fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "outbound_rule_fqdn", value)
class OutboundFirewallRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
outbound_rule_fqdn: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An Azure SQL DB Server Outbound Firewall Rule.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OutboundFirewallRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An Azure SQL DB Server Outbound Firewall Rule.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param OutboundFirewallRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OutboundFirewallRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
outbound_rule_fqdn: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OutboundFirewallRuleArgs.__new__(OutboundFirewallRuleArgs)
__props__.__dict__["outbound_rule_fqdn"] = outbound_rule_fqdn
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql:OutboundFirewallRule"), pulumi.Alias(type_="azure-native:sql/v20210201preview:OutboundFirewallRule"), pulumi.Alias(type_="azure-nextgen:sql/v20210201preview:OutboundFirewallRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OutboundFirewallRule, __self__).__init__(
'azure-native:sql:OutboundFirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'OutboundFirewallRule':
"""
Get an existing OutboundFirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = OutboundFirewallRuleArgs.__new__(OutboundFirewallRuleArgs)
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return OutboundFirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The state of the outbound rule.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
94d8d69b957710067f870841aeea79b4d0747e2f
|
7dbe84760c95ee2ef15b28724bc73c173fab1f1a
|
/download2-8-1.py
|
c2c0a4db78e9230a650e421f354d1c8aceb7af0c
|
[] |
no_license
|
seil3377/python_section2
|
f0af3342df24d2317f21081d59a4f5f42e4f0d32
|
eb5af490480cd53031e93d833ed74d20e7367933
|
refs/heads/master
| 2020-03-27T13:06:33.114567
| 2018-09-18T08:11:26
| 2018-09-18T08:11:26
| 146,586,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
from bs4 import BeautifulSoup
import urllib.request as req
import urllib.parse as rep
import sys
import io
import os
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
#403error : 헤더(Header)정보를 심어서 User-agent 정보를 같이 보내는 코드를 통해 해결
opener = req.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
req.install_opener(opener)
# HTML 가져오기
base = "https://search.naver.com/search.naver?where=image&query="
quote = rep.quote_plus("사자")
url = base + quote
res = req.urlopen(url)
savePath = "D:/Atom_Workspace/section2/img/"
#폴더 생성 & 예외처리
try:
if not(os.path.isdir(savePath)):
os.makedirs(os.path.join(savePath))
except OSError as e:
if e.errno != errno.EEXIST:
print("폴더 만들기 실패!")
raise
soup = BeautifulSoup(res, "html.parser")
img_list = soup.select("div.img_area > a.thumb._thumb > img")
#print("test", img_list)
for i, div in enumerate(img_list,1):
print("div =", div['data-source'])
fullfilename = os.path.join(savePath, savePath+str(i)+'.jpg')
print(fullfilename)
req.urlretrieve(div['data-source'],fullfilename)
print(i)
|
[
"seil3377@naver.com"
] |
seil3377@naver.com
|
5d7dcefc5473baa7b1829179e02b456b2dc7a467
|
1526f23c4a847768a5fc1c2ba596275d5d7ec0d5
|
/assgn2/pr7.py
|
16044b8670c8a1a1d63e463e2a9b0422df3600da
|
[] |
no_license
|
gitank007/python-assignmnet
|
d07e7d222242f13dbfe7bd5d1f7a75ddb8be9aa7
|
7b7588927a17c46dfea2b141c7ccf91b4b5f3949
|
refs/heads/master
| 2023-02-28T17:02:45.217169
| 2021-02-11T08:03:57
| 2021-02-11T08:03:57
| 333,665,413
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
''' nature of roots
step 1: if d=b^2 -4ac =0 ==>real and equal roots
step 2 if d>0 then they are rel roots
step 3 if d<0 then it is imaginary rots
'''
x,y,z=[int(x) for x in input("enter the space seprating values of cofficients of ax^2+bx+c values will be considered in repective manner: ").split()]
print("a={} ,b={}, c={} ".format(z,y,z))
d=(y**2)-4*x*z
print("the Value of discriminant for given equation is :",d)
if d==0:
print("The roots are real and equal ")
elif d>0:
print("The roots are real ")
else:
print("The rots are imaginary ")
|
[
"noreply@github.com"
] |
gitank007.noreply@github.com
|
290d3ea467c81eb4adcf2a72c26529c2a9e07eb4
|
1a2ca64839723ede3134a0781128b0dc0b5f6ab8
|
/ExtractFeatures/Data/jsinix/Problem10.py
|
8f8dd302f291fb441406f1d44ec06aae2c72032e
|
[] |
no_license
|
vivekaxl/LexisNexis
|
bc8ee0b92ae95a200c41bd077082212243ee248c
|
5fa3a818c3d41bd9c3eb25122e1d376c8910269c
|
refs/heads/master
| 2021-01-13T01:44:41.814348
| 2015-07-08T15:42:35
| 2015-07-08T15:42:35
| 29,705,371
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
# Question: Summation of primes
# Problem 10
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
# Answer: 142913828922
#!/usr/bin/python
def is_prime(num):
for j in range(2,num):
if (num % j) == 0:
return False
return True
list1 = []
for i in range(3,2000000,2):
if is_prime(i) == True:
list1.append(i)
sum1 = 0
for j in list1:
sum1 = sum1+j
print sum1
|
[
"vivekaxl@gmail.com"
] |
vivekaxl@gmail.com
|
0ac33827a9a2da4ec675edc41ce481fc5157e0f5
|
a7f43722f91b6cc75b46924daa08d856e9c4793c
|
/env/bin/pip3
|
8ad698ab09c3dd8b9c007b1e05cada42de0aee9b
|
[] |
no_license
|
bodevone/django-blog
|
b226b998d609dd6726053857fd38c5abc2c98734
|
ee4e5dbafa94b2fe18261cf229ace3c741c97203
|
refs/heads/master
| 2020-05-03T10:11:02.256327
| 2019-03-30T15:08:28
| 2019-03-30T15:08:28
| 178,573,157
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/home/bodevan/Documents/coding/django-blog/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"aliby.bbb@gmail.com"
] |
aliby.bbb@gmail.com
|
|
da8b517d85a513db952f23fb60cba730404ee688
|
3c8cc407d7547f8702bfe06deb5f1c9087778ce3
|
/hiword/dataloader.py
|
4d6c1c1a4918a7d6da18e55b56ca7658f88ceae8
|
[
"Apache-2.0"
] |
permissive
|
jadbin/hiword
|
accaebbdee899e8e3ed11e024d6c488bca36c445
|
7789412747a2b6b59ee974f2a2efd57e355e3282
|
refs/heads/master
| 2023-04-06T21:57:17.260338
| 2023-03-15T16:21:49
| 2023-03-15T16:21:49
| 166,003,998
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
import os
from collections import defaultdict
from os import listdir
from os.path import join, dirname, isdir
class DictLoader:
def __init__(self):
self.dict = self._load_dict()
def word_freq(self, word):
return self.dict[word]
@staticmethod
def _load_dict():
res = defaultdict(lambda: 0)
d = join(dirname(__file__), 'data', 'dicts')
for name in os.listdir(d):
if name.endswith('.txt'):
dict_file = join(d, name)
with open(dict_file) as f:
for line in f:
s = line.split()
res[s[0]] = max(res[s[0]], int(s[1]))
return res
class IDFLoader:
def __init__(self):
self.idf = self._load_idf()
self.median_idf = sorted(self.idf.values())[len(self.idf) // 2]
def word_idf(self, word):
return self.idf.get(word, self.median_idf)
@staticmethod
def _load_idf():
idf_file = join(dirname(__file__), 'data', 'idf.txt')
res = {}
with open(idf_file) as f:
while True:
line = f.readline()
if not line:
break
s = line.split()
res[s[0]] = float(s[1])
return res
class StopwordsLoader:
def __init__(self):
self.stopwords = self._load_stopwords()
def is_stopword(self, word):
return word in self.stopwords
def remove(self, word):
self.stopwords.remove(word)
@staticmethod
def _load_stopwords():
file = join(dirname(__file__), 'data', 'stopwords')
res = set()
files = []
if isdir(file):
for fname in listdir(file):
if fname.endswith('.txt'):
files.append(join(file, fname))
else:
files.append(file)
for fname in files:
with open(fname) as f:
while True:
line = f.readline()
if not line:
break
s = line.strip()
res.add(s)
return res
|
[
"jadbin.com@hotmail.com"
] |
jadbin.com@hotmail.com
|
0335df4074ddc3d154d3724fb2fef2df225be97f
|
76f05bd66e0585d1f0ef9c6829be936332706f2a
|
/yibanApi/__init__.py
|
30d7de0a0830f48a6a09d044d0cf34b8149d0bad
|
[
"MIT"
] |
permissive
|
vincijy/yibanApi
|
b48da529b962b176f08c9e4227727d0499836caa
|
57bf9b821fb5b5154e8df7a3f7f4bdb2245dfcbf
|
refs/heads/master
| 2020-03-13T20:03:36.363584
| 2018-05-02T10:29:24
| 2018-05-02T10:29:24
| 131,266,046
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39
|
py
|
from .yibanApi import AccessToken, api
|
[
"awefight@qq.com"
] |
awefight@qq.com
|
8095b9c0a7bae6822334409e1ce923939046e30c
|
141b42d9d72636c869ff2ce7a2a9f7b9b24f508b
|
/myvenv/Lib/site-packages/cairosvg/colors.py
|
d77cb275f83cb564c24eb127821c4d38243acfbb
|
[
"BSD-3-Clause"
] |
permissive
|
Fa67/saleor-shop
|
105e1147e60396ddab6f006337436dcbf18e8fe1
|
76110349162c54c8bfcae61983bb59ba8fb0f778
|
refs/heads/master
| 2021-06-08T23:51:12.251457
| 2018-07-24T08:14:33
| 2018-07-24T08:14:33
| 168,561,915
| 1
| 0
|
BSD-3-Clause
| 2021-04-18T07:59:12
| 2019-01-31T17:00:39
|
Python
|
UTF-8
|
Python
| false
| false
| 11,254
|
py
|
# This file is part of CairoSVG
# Copyright © 2010-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with CairoSVG. If not, see <http://www.gnu.org/licenses/>.
"""
SVG colors.
"""
import re
COLORS = {
'aliceblue': (240 / 255, 248 / 255, 255 / 255, 1),
'antiquewhite': (250 / 255, 235 / 255, 215 / 255, 1),
'aqua': (0 / 255, 255 / 255, 255 / 255, 1),
'aquamarine': (127 / 255, 255 / 255, 212 / 255, 1),
'azure': (240 / 255, 255 / 255, 255 / 255, 1),
'beige': (245 / 255, 245 / 255, 220 / 255, 1),
'bisque': (255 / 255, 228 / 255, 196 / 255, 1),
'black': (0 / 255, 0 / 255, 0 / 255, 1),
'blanchedalmond': (255 / 255, 235 / 255, 205 / 255, 1),
'blue': (0 / 255, 0 / 255, 255 / 255, 1),
'blueviolet': (138 / 255, 43 / 255, 226 / 255, 1),
'brown': (165 / 255, 42 / 255, 42 / 255, 1),
'burlywood': (222 / 255, 184 / 255, 135 / 255, 1),
'cadetblue': (95 / 255, 158 / 255, 160 / 255, 1),
'chartreuse': (127 / 255, 255 / 255, 0 / 255, 1),
'chocolate': (210 / 255, 105 / 255, 30 / 255, 1),
'coral': (255 / 255, 127 / 255, 80 / 255, 1),
'cornflowerblue': (100 / 255, 149 / 255, 237 / 255, 1),
'cornsilk': (255 / 255, 248 / 255, 220 / 255, 1),
'crimson': (220 / 255, 20 / 255, 60 / 255, 1),
'cyan': (0 / 255, 255 / 255, 255 / 255, 1),
'darkblue': (0 / 255, 0 / 255, 139 / 255, 1),
'darkcyan': (0 / 255, 139 / 255, 139 / 255, 1),
'darkgoldenrod': (184 / 255, 134 / 255, 11 / 255, 1),
'darkgray': (169 / 255, 169 / 255, 169 / 255, 1),
'darkgreen': (0 / 255, 100 / 255, 0 / 255, 1),
'darkgrey': (169 / 255, 169 / 255, 169 / 255, 1),
'darkkhaki': (189 / 255, 183 / 255, 107 / 255, 1),
'darkmagenta': (139 / 255, 0 / 255, 139 / 255, 1),
'darkolivegreen': (85 / 255, 107 / 255, 47 / 255, 1),
'darkorange': (255 / 255, 140 / 255, 0 / 255, 1),
'darkorchid': (153 / 255, 50 / 255, 204 / 255, 1),
'darkred': (139 / 255, 0 / 255, 0 / 255, 1),
'darksalmon': (233 / 255, 150 / 255, 122 / 255, 1),
'darkseagreen': (143 / 255, 188 / 255, 143 / 255, 1),
'darkslateblue': (72 / 255, 61 / 255, 139 / 255, 1),
'darkslategray': (47 / 255, 79 / 255, 79 / 255, 1),
'darkslategrey': (47 / 255, 79 / 255, 79 / 255, 1),
'darkturquoise': (0 / 255, 206 / 255, 209 / 255, 1),
'darkviolet': (148 / 255, 0 / 255, 211 / 255, 1),
'deeppink': (255 / 255, 20 / 255, 147 / 255, 1),
'deepskyblue': (0 / 255, 191 / 255, 255 / 255, 1),
'dimgray': (105 / 255, 105 / 255, 105 / 255, 1),
'dimgrey': (105 / 255, 105 / 255, 105 / 255, 1),
'dodgerblue': (30 / 255, 144 / 255, 255 / 255, 1),
'firebrick': (178 / 255, 34 / 255, 34 / 255, 1),
'floralwhite': (255 / 255, 250 / 255, 240 / 255, 1),
'forestgreen': (34 / 255, 139 / 255, 34 / 255, 1),
'fuchsia': (255 / 255, 0 / 255, 255 / 255, 1),
'gainsboro': (220 / 255, 220 / 255, 220 / 255, 1),
'ghostwhite': (248 / 255, 248 / 255, 255 / 255, 1),
'gold': (255 / 255, 215 / 255, 0 / 255, 1),
'goldenrod': (218 / 255, 165 / 255, 32 / 255, 1),
'gray': (128 / 255, 128 / 255, 128 / 255, 1),
'grey': (128 / 255, 128 / 255, 128 / 255, 1),
'green': (0 / 255, 128 / 255, 0 / 255, 1),
'greenyellow': (173 / 255, 255 / 255, 47 / 255, 1),
'honeydew': (240 / 255, 255 / 255, 240 / 255, 1),
'hotpink': (255 / 255, 105 / 255, 180 / 255, 1),
'indianred': (205 / 255, 92 / 255, 92 / 255, 1),
'indigo': (75 / 255, 0 / 255, 130 / 255, 1),
'ivory': (255 / 255, 255 / 255, 240 / 255, 1),
'khaki': (240 / 255, 230 / 255, 140 / 255, 1),
'lavender': (230 / 255, 230 / 255, 250 / 255, 1),
'lavenderblush': (255 / 255, 240 / 255, 245 / 255, 1),
'lawngreen': (124 / 255, 252 / 255, 0 / 255, 1),
'lemonchiffon': (255 / 255, 250 / 255, 205 / 255, 1),
'lightblue': (173 / 255, 216 / 255, 230 / 255, 1),
'lightcoral': (240 / 255, 128 / 255, 128 / 255, 1),
'lightcyan': (224 / 255, 255 / 255, 255 / 255, 1),
'lightgoldenrodyellow': (250 / 255, 250 / 255, 210 / 255, 1),
'lightgray': (211 / 255, 211 / 255, 211 / 255, 1),
'lightgreen': (144 / 255, 238 / 255, 144 / 255, 1),
'lightgrey': (211 / 255, 211 / 255, 211 / 255, 1),
'lightpink': (255 / 255, 182 / 255, 193 / 255, 1),
'lightsalmon': (255 / 255, 160 / 255, 122 / 255, 1),
'lightseagreen': (32 / 255, 178 / 255, 170 / 255, 1),
'lightskyblue': (135 / 255, 206 / 255, 250 / 255, 1),
'lightslategray': (119 / 255, 136 / 255, 153 / 255, 1),
'lightslategrey': (119 / 255, 136 / 255, 153 / 255, 1),
'lightsteelblue': (176 / 255, 196 / 255, 222 / 255, 1),
'lightyellow': (255 / 255, 255 / 255, 224 / 255, 1),
'lime': (0 / 255, 255 / 255, 0 / 255, 1),
'limegreen': (50 / 255, 205 / 255, 50 / 255, 1),
'linen': (250 / 255, 240 / 255, 230 / 255, 1),
'magenta': (255 / 255, 0 / 255, 255 / 255, 1),
'maroon': (128 / 255, 0 / 255, 0 / 255, 1),
'mediumaquamarine': (102 / 255, 205 / 255, 170 / 255, 1),
'mediumblue': (0 / 255, 0 / 255, 205 / 255, 1),
'mediumorchid': (186 / 255, 85 / 255, 211 / 255, 1),
'mediumpurple': (147 / 255, 112 / 255, 219 / 255, 1),
'mediumseagreen': (60 / 255, 179 / 255, 113 / 255, 1),
'mediumslateblue': (123 / 255, 104 / 255, 238 / 255, 1),
'mediumspringgreen': (0 / 255, 250 / 255, 154 / 255, 1),
'mediumturquoise': (72 / 255, 209 / 255, 204 / 255, 1),
'mediumvioletred': (199 / 255, 21 / 255, 133 / 255, 1),
'midnightblue': (25 / 255, 25 / 255, 112 / 255, 1),
'mintcream': (245 / 255, 255 / 255, 250 / 255, 1),
'mistyrose': (255 / 255, 228 / 255, 225 / 255, 1),
'moccasin': (255 / 255, 228 / 255, 181 / 255, 1),
'navajowhite': (255 / 255, 222 / 255, 173 / 255, 1),
'navy': (0 / 255, 0 / 255, 128 / 255, 1),
'oldlace': (253 / 255, 245 / 255, 230 / 255, 1),
'olive': (128 / 255, 128 / 255, 0 / 255, 1),
'olivedrab': (107 / 255, 142 / 255, 35 / 255, 1),
'orange': (255 / 255, 165 / 255, 0 / 255, 1),
'orangered': (255 / 255, 69 / 255, 0 / 255, 1),
'orchid': (218 / 255, 112 / 255, 214 / 255, 1),
'palegoldenrod': (238 / 255, 232 / 255, 170 / 255, 1),
'palegreen': (152 / 255, 251 / 255, 152 / 255, 1),
'paleturquoise': (175 / 255, 238 / 255, 238 / 255, 1),
'palevioletred': (219 / 255, 112 / 255, 147 / 255, 1),
'papayawhip': (255 / 255, 239 / 255, 213 / 255, 1),
'peachpuff': (255 / 255, 218 / 255, 185 / 255, 1),
'peru': (205 / 255, 133 / 255, 63 / 255, 1),
'pink': (255 / 255, 192 / 255, 203 / 255, 1),
'plum': (221 / 255, 160 / 255, 221 / 255, 1),
'powderblue': (176 / 255, 224 / 255, 230 / 255, 1),
'purple': (128 / 255, 0 / 255, 128 / 255, 1),
'red': (255 / 255, 0 / 255, 0 / 255, 1),
'rosybrown': (188 / 255, 143 / 255, 143 / 255, 1),
'royalblue': (65 / 255, 105 / 255, 225 / 255, 1),
'saddlebrown': (139 / 255, 69 / 255, 19 / 255, 1),
'salmon': (250 / 255, 128 / 255, 114 / 255, 1),
'sandybrown': (244 / 255, 164 / 255, 96 / 255, 1),
'seagreen': (46 / 255, 139 / 255, 87 / 255, 1),
'seashell': (255 / 255, 245 / 255, 238 / 255, 1),
'sienna': (160 / 255, 82 / 255, 45 / 255, 1),
'silver': (192 / 255, 192 / 255, 192 / 255, 1),
'skyblue': (135 / 255, 206 / 255, 235 / 255, 1),
'slateblue': (106 / 255, 90 / 255, 205 / 255, 1),
'slategray': (112 / 255, 128 / 255, 144 / 255, 1),
'slategrey': (112 / 255, 128 / 255, 144 / 255, 1),
'snow': (255 / 255, 250 / 255, 250 / 255, 1),
'springgreen': (0 / 255, 255 / 255, 127 / 255, 1),
'steelblue': (70 / 255, 130 / 255, 180 / 255, 1),
'tan': (210 / 255, 180 / 255, 140 / 255, 1),
'teal': (0 / 255, 128 / 255, 128 / 255, 1),
'thistle': (216 / 255, 191 / 255, 216 / 255, 1),
'tomato': (255 / 255, 99 / 255, 71 / 255, 1),
'turquoise': (64 / 255, 224 / 255, 208 / 255, 1),
'violet': (238 / 255, 130 / 255, 238 / 255, 1),
'wheat': (245 / 255, 222 / 255, 179 / 255, 1),
'white': (255 / 255, 255 / 255, 255 / 255, 1),
'whitesmoke': (245 / 255, 245 / 255, 245 / 255, 1),
'yellow': (255 / 255, 255 / 255, 0 / 255, 1),
'yellowgreen': (154 / 255, 205 / 255, 50 / 255, 1),
'activeborder': (0, 0, 1, 1),
'activecaption': (0, 0, 1, 1),
'appworkspace': (1, 1, 1, 1),
'background': (1, 1, 1, 1),
'buttonface': (0, 0, 0, 1),
'buttonhighlight': (0.8, 0.8, 0.8, 1),
'buttonshadow': (0.2, 0.2, 0.2, 1),
'buttontext': (0, 0, 0, 1),
'captiontext': (0, 0, 0, 1),
'graytext': (0.2, 0.2, 0.2, 1),
'highlight': (0, 0, 1, 1),
'highlighttext': (0.8, 0.8, 0.8, 1),
'inactiveborder': (0.2, 0.2, 0.2, 1),
'inactivecaption': (0.8, 0.8, 0.8, 1),
'inactivecaptiontext': (0.2, 0.2, 0.2, 1),
'infobackground': (0.8, 0.8, 0.8, 1),
'infotext': (0, 0, 0, 1),
'menu': (0.8, 0.8, 0.8, 1),
'menutext': (0.2, 0.2, 0.2, 1),
'scrollbar': (0.8, 0.8, 0.8, 1),
'threeddarkshadow': (0.2, 0.2, 0.2, 1),
'threedface': (0.8, 0.8, 0.8, 1),
'threedhighlight': (1, 1, 1, 1),
'threedlightshadow': (0.2, 0.2, 0.2, 1),
'threedshadow': (0.2, 0.2, 0.2, 1),
'window': (0.8, 0.8, 0.8, 1),
'windowframe': (0.8, 0.8, 0.8, 1),
'windowtext': (0, 0, 0, 1),
'none': (0, 0, 0, 0),
'transparent': (0, 0, 0, 0),
}
RGBA = re.compile(r'rgba\([ \n\r\t]*(.+?)[ \n\r\t]*\)')
RGB = re.compile(r'rgb\([ \n\r\t]*(.+?)[ \n\r\t]*\)')
HEX_RRGGBB = re.compile('#[0-9a-f]{6}')
HEX_RGB = re.compile('#[0-9a-f]{3}')
def color(string, opacity=1):
"""Replace ``string`` representing a color by a RGBA tuple.
See http://www.w3.org/TR/SVG/types.html#DataTypeColor
"""
if not string:
return (0, 0, 0, 0)
string = string.strip().lower()
if string in COLORS:
r, g, b, a = COLORS[string]
return (r, g, b, a * opacity)
match = RGBA.search(string)
if match:
r, g, b, a = tuple(
float(i.strip(' %')) / 100 if '%' in i else float(i) / 255
for i in match.group(1).split(','))
return (r, g, b, a * 255 * opacity)
match = RGB.search(string)
if match:
r, g, b = tuple(
float(i.strip(' %')) / 100 if '%' in i else float(i) / 255
for i in match.group(1).split(','))
return (r, g, b, opacity)
match = HEX_RRGGBB.search(string)
if match:
plain_color = tuple(
int(value, 16) / 255 for value in (
string[1:3], string[3:5], string[5:7]))
return plain_color + (opacity,)
match = HEX_RGB.search(string)
if match:
plain_color = tuple(
int(value, 16) / 15 for value in (
string[1], string[2], string[3]))
return plain_color + (opacity,)
return (0, 0, 0, 1)
|
[
"gruzdevasch@gmail.com"
] |
gruzdevasch@gmail.com
|
1153e48fb209a9481fc4f3d804857ae10cbf9c27
|
21264264d3323c2d801042ffe3aeecb97c393251
|
/regimens/tests/test_forms.py
|
e3402a9f38a01eb7015a07c85eb153af54a1a05f
|
[
"MIT"
] |
permissive
|
michael-xander/communique-webapp
|
a5dd45b3c1019d40ca02fea587ade35dbf16c0d0
|
85b450d7f6d0313c5e5ef53a262a850b7e93c3d6
|
refs/heads/master
| 2021-05-01T15:34:06.013729
| 2016-11-19T19:15:31
| 2016-11-19T19:15:31
| 63,105,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
from django.test import TestCase
import datetime
from regimens.forms import RegimenForm
from regimens.models import Drug
from patients.models import Patient
class RegimenFormTestCase(TestCase):
"""
Test cases for the regimen create form.
"""
def setUp(self):
Patient.objects.create(other_names='Jon', last_name='Snow', sex=Patient.MALE, identifier='A001')
Drug.objects.create(name='a Drug', description='A drug description')
def test_date_validation(self):
"""
Tests that the form invalidates submissions where the start date is greater then the end date
"""
form = RegimenForm()
self.assertFalse(form.is_bound)
self.assertFalse(form.is_valid())
current_date = datetime.date.today()
one_day = datetime.timedelta(days=1)
tomorrow = current_date + one_day
data = {}
data['patient'] = 1
data['notes'] = 'Sample notes'
data['drugs'] = (1,)
data['date_started'] = current_date
data['date_ended'] = tomorrow
self.assertTrue(data['date_started'] < data['date_ended'])
form = RegimenForm(data)
self.assertTrue(form.is_bound)
self.assertTrue(form.is_valid())
data['date_started'] = tomorrow
data['date_ended'] = current_date
self.assertFalse(data['date_started'] < data['date_ended'])
form = RegimenForm(data)
self.assertTrue(data)
self.assertFalse(form.is_valid())
|
[
"michaelkyeyune01@gmail.com"
] |
michaelkyeyune01@gmail.com
|
97c39494d2c4397abe0b8e90109bb32a2af8219e
|
6730a45a5dcf89b7e9868f0a88e47541a5f30dc6
|
/tests/task_stat/test.py
|
c7a34f168b428d9a9ce28b1724701f1d8398d95e
|
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
dontkme/rmats-turbo
|
def1ca43c88b9c22827ef9652420d71d12f0f11f
|
8a2ad659717a1ccd6dbecd593dc1370ba7c30621
|
refs/heads/master
| 2023-08-24T11:19:14.177599
| 2021-08-03T12:50:52
| 2021-08-03T12:50:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,997
|
py
|
import collections
import os.path
import shutil
import subprocess
import sys
import unittest
import tests.base_test
import tests.output_parser as output_parser
import tests.test_config
import tests.util
class Test(tests.base_test.BaseTest):
def setUp(self):
super().setUp()
self._test_base_dir = tests.test_config.TEST_BASE_DIR
self._test_dir = os.path.join(self._test_base_dir, 'task_stat')
self._generated_input_dir = os.path.join(self._test_dir,
'generated_input')
self._out_dir_all = os.path.join(self._test_dir, 'out_all')
self._tmp_dir_all = os.path.join(self._test_dir, 'tmp_all')
self._out_dir_select = os.path.join(self._test_dir, 'out_select')
self._tmp_dir_select = os.path.join(self._test_dir, 'tmp_select')
self._out_dir_just_se = os.path.join(self._test_dir, 'out_just_se')
self._tmp_dir_just_se = os.path.join(self._test_dir, 'tmp_just_se')
tests.util.recreate_dirs([
self._generated_input_dir, self._out_dir_all, self._tmp_dir_all,
self._out_dir_select, self._tmp_dir_select, self._out_dir_just_se,
self._tmp_dir_just_se,
self._command_output_dir()
])
self._read_type = 'paired'
self._read_length = 50
self._chromosome_length = 4000
self._sample_1_bams_path = os.path.join(self._generated_input_dir,
'b1.txt')
self._sample_2_bams_path = os.path.join(self._generated_input_dir,
'b2.txt')
sample_1_bam_replicate_template = os.path.join(
self._generated_input_dir, 'sample_1_rep_{}.bam')
sample_2_bam_replicate_template = os.path.join(
self._generated_input_dir, 'sample_2_rep_{}.bam')
self._sample_1_bams = self._create_sample_1_bams(
self._sample_1_bams_path, sample_1_bam_replicate_template)
self._sample_2_bams = self._create_sample_2_bams(
self._sample_2_bams_path, sample_2_bam_replicate_template)
self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf')
self._gtf = self._create_gtf_from_transcripts(
self._gtf_path, self._exons_by_transcript())
self._sub_steps = [
'statoff',
'selected_stat',
'just_se',
'deferred_stat',
]
self._sub_step = None
def test(self):
for sub_step in self._sub_steps:
self._sub_step = sub_step
self._setup_sub_step()
self._run_test()
def _command_output_dir(self):
return os.path.join(self._test_dir, 'command_output')
def _rmats_arguments(self):
if self._sub_step == 'statoff':
return [
'--gtf', self._gtf_path, '-t', self._read_type, '--readLength',
str(self._read_length), '--od', self._out_dir_all, '--tmp',
self._tmp_dir_all, '--b1', self._sample_1_bams_path, '--b2',
self._sample_2_bams_path, '--task', 'both', '--statoff'
]
if self._sub_step == 'selected_stat':
return [
'--od', self._out_dir_select, '--tmp', self._tmp_dir_select,
'--task', 'stat'
]
if self._sub_step == 'just_se':
return [
'--od', self._out_dir_just_se, '--tmp', self._tmp_dir_just_se,
'--task', 'stat'
]
if self._sub_step == 'deferred_stat':
return [
'--od', self._out_dir_all, '--tmp', self._tmp_dir_all,
'--task', 'stat'
]
return None
def _setup_sub_step(self):
if self._sub_step == 'selected_stat':
self._setup_selected_stat()
if self._sub_step == 'just_se':
self._setup_just_se()
def _setup_selected_stat(self):
self._prepare_stat_inputs(self._out_dir_select, self._out_dir_all, [1],
[0, 3])
def _setup_just_se(self):
orig_from_gtf = os.path.join(self._out_dir_all, 'fromGTF.SE.txt')
new_from_gtf = os.path.join(self._out_dir_just_se, 'fromGTF.SE.txt')
shutil.copy(orig_from_gtf, new_from_gtf)
orig_raw = os.path.join(self._out_dir_all, 'JC.raw.input.SE.txt')
new_raw = os.path.join(self._out_dir_just_se, 'JC.raw.input.SE.txt')
shutil.copy(orig_raw, new_raw)
def _create_sample_1_bams(self, sample_1_bams_path,
sample_1_replicate_template):
rep_1_bam_path = sample_1_replicate_template.format(1)
rep_1_bam = self._create_bam_from_paired_read_coords(
rep_1_bam_path, self._chromosome_length, self._read_length,
self._paired_read_coords_1_1())
rep_2_bam_path = sample_1_replicate_template.format(2)
rep_2_bam = self._create_bam_from_paired_read_coords(
rep_2_bam_path, self._chromosome_length, self._read_length,
self._paired_read_coords_1_2())
sample_1_bams = [rep_1_bam, rep_2_bam]
self._write_bams(sample_1_bams, sample_1_bams_path)
return sample_1_bams
def _create_sample_2_bams(self, sample_2_bams_path,
sample_2_replicate_template):
rep_1_bam_path = sample_2_replicate_template.format(1)
rep_1_bam = self._create_bam_from_paired_read_coords(
rep_1_bam_path, self._chromosome_length, self._read_length,
self._paired_read_coords_2_1())
rep_2_bam_path = sample_2_replicate_template.format(2)
rep_2_bam = self._create_bam_from_paired_read_coords(
rep_2_bam_path, self._chromosome_length, self._read_length,
self._paired_read_coords_2_2())
sample_2_bams = [rep_1_bam, rep_2_bam]
self._write_bams(sample_2_bams, sample_2_bams_path)
return sample_2_bams
def _exons_by_transcript(self):
return [
[(1, 100), (201, 300), (401, 500)], # SE 1
[(601, 700), (801, 900), (1001, 1100)], # SE 2
[(1201, 1300), (1401, 1500), (1801, 1900)], # MXE
[(1201, 1300), (1601, 1700), (1801, 1900)], # MXE
[(2001, 2100), (2301, 2400)], # A5SS
[(2001, 2200), (2301, 2400)], # A5SS
[(2501, 2600), (2701, 2900)], # A3SS
[(2501, 2600), (2801, 2900)], # A3SS
[(3001, 3100), (3201, 3300)], # RI
[(3001, 3300)], # RI
]
def _include_read_SE_1(self):
return ([[81, 100], [201, 300]], [[201, 300]])
def _skip_read_SE_1(self):
return ([[81, 100], [401, 500]], [[401, 500]])
def _include_read_SE_2(self):
return ([[681, 700], [801, 900]], [[801, 900]])
def _skip_read_SE_2(self):
return ([[681, 700], [1001, 1100]], [[1001, 1100]])
def _se_reads_from_counts(self, i1, s1, i2, s2):
return i1 * [self._include_read_SE_1()] + s1 * [
self._skip_read_SE_1()
] + i2 * [self._include_read_SE_2()] + s2 * [self._skip_read_SE_2()]
def _mxe_read_1(self):
return ([[1281, 1300], [1401, 1500]], [[1401, 1500]])
def _mxe_read_2(self):
return ([[1281, 1300], [1601, 1700]], [[1601, 1700]])
def _a5ss_read_1(self):
return ([[2081, 2100], [2301, 2400]], [[2301, 2400]])
def _a5ss_read_2(self):
return ([[2181, 2200], [2301, 2400]], [[2301, 2400]])
def _a3ss_read_1(self):
return ([[2581, 2600], [2701, 2900]], [[2701, 2900]])
def _a3ss_read_2(self):
return ([[2581, 2600], [2801, 2900]], [[2801, 2900]])
def _ri_read_1(self):
return ([[3081, 3100], [3201, 3300]], [[3201, 3300]])
def _ri_read_2(self):
return ([[3081, 3300]], [[3001, 3220]])
def _other_reads(self):
return [
self._mxe_read_1(),
self._mxe_read_2(),
self._a5ss_read_1(),
self._a5ss_read_2(),
self._a3ss_read_1(),
self._a3ss_read_2(),
self._ri_read_1(),
self._ri_read_2()
]
def _paired_read_coords_1_1(self):
return self._se_reads_from_counts(10, 10, 10, 0) + self._other_reads()
def _paired_read_coords_1_2(self):
return self._se_reads_from_counts(15, 5, 10, 0) + self._other_reads()
def _paired_read_coords_2_1(self):
return self._se_reads_from_counts(10, 0, 10, 10) + self._other_reads()
def _paired_read_coords_2_2(self):
return self._se_reads_from_counts(10, 0, 15, 5) + self._other_reads()
def _prepare_stat_inputs(self, new_out_dir, old_out_dir, group_1_indices,
group_2_indices):
command = [
sys.executable, tests.test_config.PREPARE_STAT_INPUTS,
'--new-output-dir', new_out_dir, '--old-output-dir', old_out_dir,
'--group-1-indices', ','.join([str(x) for x in group_1_indices]),
'--group-2-indices', ','.join([str(x) for x in group_2_indices])
]
subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
def _check_results(self):
if self._sub_step == 'statoff':
self._check_results_statoff()
elif self._sub_step == 'selected_stat':
self._check_results_selected_stat()
elif self._sub_step == 'just_se':
self._check_results_just_se()
elif self._sub_step == 'deferred_stat':
self._check_results_deferred_stat()
else:
self.fail('unexpected sub_step: {}'.format(self._sub_step))
def _read_floats(self, floats_str):
float_strs = floats_str.split(',')
floats = list()
for float_str in float_strs:
try:
floats.append(float(float_str))
except ValueError as e:
self.fail('could not parse {} as float from {}: {}'.format(
float_str, floats_str, e))
return floats
def _check_results_statoff(self):
self._check_no_error_results()
se_mats_jc_path = os.path.join(self._out_dir_all, 'SE.MATS.JC.txt')
se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc(
se_mats_jc_path)
self.assertFalse(error)
self._check_se_mats_jc_header(se_mats_jc_header)
self.assertEqual(len(se_mats_jc_rows), 2)
for row in se_mats_jc_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '10,15')
self.assertEqual(row['SJC_SAMPLE_1'], '10,5')
self.assertEqual(row['IJC_SAMPLE_2'], '10,10')
self.assertEqual(row['SJC_SAMPLE_2'], '0,0')
self.assertEqual(row['PValue'], 'NA')
self.assertEqual(row['FDR'], 'NA')
self.assertEqual(self._read_floats(row['IncLevel1']),
[0.333, 0.6])
self.assertEqual(self._read_floats(row['IncLevel2']),
[1.0, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.533)
elif row['exonStart_0base'] == '800':
self.assertEqual(row['IJC_SAMPLE_1'], '10,10')
self.assertEqual(row['SJC_SAMPLE_1'], '0,0')
self.assertEqual(row['IJC_SAMPLE_2'], '10,15')
self.assertEqual(row['SJC_SAMPLE_2'], '10,5')
self.assertEqual(row['PValue'], 'NA')
self.assertEqual(row['FDR'], 'NA')
self.assertEqual(self._read_floats(row['IncLevel1']),
[1.0, 1.0])
self.assertEqual(self._read_floats(row['IncLevel2']),
[0.333, 0.6])
self.assertEqual(float(row['IncLevelDifference']), 0.533)
se_mats_jcec_path = os.path.join(self._out_dir_all, 'SE.MATS.JCEC.txt')
se_mats_jcec_header, se_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(se_mats_jcec_path))
self.assertFalse(error)
self._check_se_mats_jcec_header(se_mats_jcec_header)
self.assertEqual(len(se_mats_jcec_rows), 2)
for row in se_mats_jcec_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '20,30')
self.assertEqual(row['SJC_SAMPLE_1'], '10,5')
self.assertEqual(row['IJC_SAMPLE_2'], '20,20')
self.assertEqual(row['SJC_SAMPLE_2'], '0,0')
self.assertEqual(row['PValue'], 'NA')
self.assertEqual(row['FDR'], 'NA')
self.assertEqual(self._read_floats(row['IncLevel1']),
[0.397, 0.664])
self.assertEqual(self._read_floats(row['IncLevel2']),
[1.0, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.47)
mxe_mats_jc_path = os.path.join(self._out_dir_all, 'MXE.MATS.JC.txt')
mxe_mats_jc_header, mxe_mats_jc_rows, error = (
output_parser.parse_mats_jc(mxe_mats_jc_path))
self.assertFalse(error)
self._check_mxe_mats_jc_header(mxe_mats_jc_header)
self.assertEqual(len(mxe_mats_jc_rows), 1)
self.assertEqual(mxe_mats_jc_rows[0]['FDR'], 'NA')
mxe_mats_jcec_path = os.path.join(self._out_dir_all,
'MXE.MATS.JCEC.txt')
mxe_mats_jcec_header, mxe_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(mxe_mats_jcec_path))
self.assertFalse(error)
self._check_mxe_mats_jcec_header(mxe_mats_jcec_header)
self.assertEqual(len(mxe_mats_jcec_rows), 1)
self.assertEqual(mxe_mats_jcec_rows[0]['FDR'], 'NA')
a5ss_mats_jc_path = os.path.join(self._out_dir_all, 'A5SS.MATS.JC.txt')
a5ss_mats_jc_header, a5ss_mats_jc_rows, error = (
output_parser.parse_mats_jc(a5ss_mats_jc_path))
self.assertFalse(error)
self._check_a35ss_mats_jc_header(a5ss_mats_jc_header)
self.assertEqual(len(a5ss_mats_jc_rows), 1)
self.assertEqual(a5ss_mats_jc_rows[0]['FDR'], 'NA')
a5ss_mats_jcec_path = os.path.join(self._out_dir_all,
'A5SS.MATS.JCEC.txt')
a5ss_mats_jcec_header, a5ss_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(a5ss_mats_jcec_path))
self.assertFalse(error)
self._check_a35ss_mats_jcec_header(a5ss_mats_jcec_header)
self.assertEqual(len(a5ss_mats_jcec_rows), 1)
self.assertEqual(a5ss_mats_jcec_rows[0]['FDR'], 'NA')
a3ss_mats_jc_path = os.path.join(self._out_dir_all, 'A3SS.MATS.JC.txt')
a3ss_mats_jc_header, a3ss_mats_jc_rows, error = (
output_parser.parse_mats_jc(a3ss_mats_jc_path))
self.assertFalse(error)
self._check_a35ss_mats_jc_header(a3ss_mats_jc_header)
self.assertEqual(len(a3ss_mats_jc_rows), 1)
self.assertEqual(a3ss_mats_jc_rows[0]['FDR'], 'NA')
a3ss_mats_jcec_path = os.path.join(self._out_dir_all,
'A3SS.MATS.JCEC.txt')
a3ss_mats_jcec_header, a3ss_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(a3ss_mats_jcec_path))
self.assertFalse(error)
self._check_a35ss_mats_jcec_header(a3ss_mats_jcec_header)
self.assertEqual(len(a3ss_mats_jcec_rows), 1)
self.assertEqual(a3ss_mats_jcec_rows[0]['FDR'], 'NA')
ri_mats_jc_path = os.path.join(self._out_dir_all, 'RI.MATS.JC.txt')
ri_mats_jc_header, ri_mats_jc_rows, error = output_parser.parse_mats_jc(
ri_mats_jc_path)
self.assertFalse(error)
self._check_ri_mats_jc_header(ri_mats_jc_header)
self.assertEqual(len(ri_mats_jc_rows), 1)
self.assertEqual(ri_mats_jc_rows[0]['FDR'], 'NA')
ri_mats_jcec_path = os.path.join(self._out_dir_all, 'RI.MATS.JCEC.txt')
ri_mats_jcec_header, ri_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(ri_mats_jcec_path))
self.assertFalse(error)
self._check_ri_mats_jcec_header(ri_mats_jcec_header)
self.assertEqual(len(ri_mats_jcec_rows), 1)
self.assertEqual(ri_mats_jcec_rows[0]['FDR'], 'NA')
def _check_results_selected_stat(self):
self._check_no_error_results()
se_mats_jc_path = os.path.join(self._out_dir_select, 'SE.MATS.JC.txt')
se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc(
se_mats_jc_path)
self.assertFalse(error)
self._check_se_mats_jc_header(se_mats_jc_header)
self.assertEqual(len(se_mats_jc_rows), 2)
for row in se_mats_jc_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '15')
self.assertEqual(row['SJC_SAMPLE_1'], '5')
self.assertEqual(row['IJC_SAMPLE_2'], '10,10')
self.assertEqual(row['SJC_SAMPLE_2'], '10,0')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']), [0.6])
self.assertEqual(self._read_floats(row['IncLevel2']),
[0.333, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.067)
elif row['exonStart_0base'] == '800':
self.assertEqual(row['IJC_SAMPLE_1'], '10')
self.assertEqual(row['SJC_SAMPLE_1'], '0')
self.assertEqual(row['IJC_SAMPLE_2'], '10,15')
self.assertEqual(row['SJC_SAMPLE_2'], '0,5')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']), [1.0])
self.assertEqual(self._read_floats(row['IncLevel2']),
[1.0, 0.6])
self.assertEqual(float(row['IncLevelDifference']), 0.2)
se_mats_jcec_path = os.path.join(self._out_dir_select,
'SE.MATS.JCEC.txt')
se_mats_jcec_header, se_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(se_mats_jcec_path))
self.assertFalse(error)
self._check_se_mats_jcec_header(se_mats_jcec_header)
self.assertEqual(len(se_mats_jcec_rows), 2)
for row in se_mats_jcec_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '30')
self.assertEqual(row['SJC_SAMPLE_1'], '5')
self.assertEqual(row['IJC_SAMPLE_2'], '20,20')
self.assertEqual(row['SJC_SAMPLE_2'], '10,0')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']), [0.664])
self.assertEqual(self._read_floats(row['IncLevel2']),
[0.397, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.034)
mxe_mats_jc_path = os.path.join(self._out_dir_select,
'MXE.MATS.JC.txt')
mxe_mats_jc_header, mxe_mats_jc_rows, error = (
output_parser.parse_mats_jc(mxe_mats_jc_path))
self.assertFalse(error)
self._check_mxe_mats_jc_header(mxe_mats_jc_header)
self.assertEqual(len(mxe_mats_jc_rows), 1)
tests.util.assert_within_bounds(self,
float(mxe_mats_jc_rows[0]['FDR']), 0,
1)
mxe_mats_jcec_path = os.path.join(self._out_dir_select,
'MXE.MATS.JCEC.txt')
mxe_mats_jcec_header, mxe_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(mxe_mats_jcec_path))
self.assertFalse(error)
self._check_mxe_mats_jcec_header(mxe_mats_jcec_header)
self.assertEqual(len(mxe_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(mxe_mats_jcec_rows[0]['FDR']), 0,
1)
a5ss_mats_jc_path = os.path.join(self._out_dir_select,
'A5SS.MATS.JC.txt')
a5ss_mats_jc_header, a5ss_mats_jc_rows, error = (
output_parser.parse_mats_jc(a5ss_mats_jc_path))
self.assertFalse(error)
self._check_a35ss_mats_jc_header(a5ss_mats_jc_header)
self.assertEqual(len(a5ss_mats_jc_rows), 1)
tests.util.assert_within_bounds(self,
float(a5ss_mats_jc_rows[0]['FDR']), 0,
1)
a5ss_mats_jcec_path = os.path.join(self._out_dir_select,
'A5SS.MATS.JCEC.txt')
a5ss_mats_jcec_header, a5ss_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(a5ss_mats_jcec_path))
self.assertFalse(error)
self._check_a35ss_mats_jcec_header(a5ss_mats_jcec_header)
self.assertEqual(len(a5ss_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(a5ss_mats_jcec_rows[0]['FDR']),
0, 1)
a3ss_mats_jc_path = os.path.join(self._out_dir_select,
'A3SS.MATS.JC.txt')
a3ss_mats_jc_header, a3ss_mats_jc_rows, error = (
output_parser.parse_mats_jc(a3ss_mats_jc_path))
self.assertFalse(error)
self._check_a35ss_mats_jc_header(a3ss_mats_jc_header)
self.assertEqual(len(a3ss_mats_jc_rows), 1)
tests.util.assert_within_bounds(self,
float(a3ss_mats_jc_rows[0]['FDR']), 0,
1)
a3ss_mats_jcec_path = os.path.join(self._out_dir_select,
'A3SS.MATS.JCEC.txt')
a3ss_mats_jcec_header, a3ss_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(a3ss_mats_jcec_path))
self.assertFalse(error)
self._check_a35ss_mats_jcec_header(a3ss_mats_jcec_header)
self.assertEqual(len(a3ss_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(a3ss_mats_jcec_rows[0]['FDR']),
0, 1)
ri_mats_jc_path = os.path.join(self._out_dir_select, 'RI.MATS.JC.txt')
ri_mats_jc_header, ri_mats_jc_rows, error = output_parser.parse_mats_jc(
ri_mats_jc_path)
self.assertFalse(error)
self._check_ri_mats_jc_header(ri_mats_jc_header)
self.assertEqual(len(ri_mats_jc_rows), 1)
tests.util.assert_within_bounds(self, float(ri_mats_jc_rows[0]['FDR']),
0, 1)
ri_mats_jcec_path = os.path.join(self._out_dir_select,
'RI.MATS.JCEC.txt')
ri_mats_jcec_header, ri_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(ri_mats_jcec_path))
self.assertFalse(error)
self._check_ri_mats_jcec_header(ri_mats_jcec_header)
self.assertEqual(len(ri_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(ri_mats_jcec_rows[0]['FDR']), 0,
1)
def _check_results_just_se(self):
self.assertEqual(self._rmats_return_code, 0)
command_stderr_file_name = self._get_stderr_file_name()
with open(command_stderr_file_name, 'rt') as err_f_h:
err_lines = err_f_h.readlines()
self.assertEqual(len(err_lines), 9)
unable_to_produce_lines = collections.defaultdict(int)
for err_line in err_lines:
self.assertIn('Unable to produce final output files for', err_line)
if ' SE ' in err_line:
unable_to_produce_lines['SE'] += 1
if ' MXE ' in err_line:
unable_to_produce_lines['MXE'] += 1
if ' A5SS ' in err_line:
unable_to_produce_lines['A5SS'] += 1
if ' A3SS ' in err_line:
unable_to_produce_lines['A3SS'] += 1
if ' RI ' in err_line:
unable_to_produce_lines['RI'] += 1
self.assertEqual(unable_to_produce_lines, {
'SE': 1,
'MXE': 2,
'A5SS': 2,
'A3SS': 2,
'RI': 2
})
se_mats_jc_path = os.path.join(self._out_dir_just_se, 'SE.MATS.JC.txt')
se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc(
se_mats_jc_path)
self.assertFalse(error)
self._check_se_mats_jc_header(se_mats_jc_header)
self.assertEqual(len(se_mats_jc_rows), 2)
for row in se_mats_jc_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '10,15')
self.assertEqual(row['SJC_SAMPLE_1'], '10,5')
self.assertEqual(row['IJC_SAMPLE_2'], '10,10')
self.assertEqual(row['SJC_SAMPLE_2'], '0,0')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']),
[0.333, 0.6])
self.assertEqual(self._read_floats(row['IncLevel2']),
[1.0, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.533)
se_mats_jcec_path = os.path.join(self._out_dir_just_se,
'SE.MATS.JCEC.txt')
self.assertFalse(os.path.exists(se_mats_jcec_path))
mxe_mats_jc_path = os.path.join(self._out_dir_just_se,
'MXE.MATS.JC.txt')
self.assertFalse(os.path.exists(mxe_mats_jc_path))
mxe_mats_jcec_path = os.path.join(self._out_dir_just_se,
'MXE.MATS.JCEC.txt')
self.assertFalse(os.path.exists(mxe_mats_jcec_path))
a5ss_mats_jc_path = os.path.join(self._out_dir_just_se,
'A5SS.MATS.JC.txt')
self.assertFalse(os.path.exists(a5ss_mats_jc_path))
a5ss_mats_jcec_path = os.path.join(self._out_dir_just_se,
'A5SS.MATS.JCEC.txt')
self.assertFalse(os.path.exists(a5ss_mats_jcec_path))
a3ss_mats_jc_path = os.path.join(self._out_dir_just_se,
'A3SS.MATS.JC.txt')
self.assertFalse(os.path.exists(a3ss_mats_jc_path))
a3ss_mats_jcec_path = os.path.join(self._out_dir_just_se,
'A3SS.MATS.JCEC.txt')
self.assertFalse(os.path.exists(a3ss_mats_jcec_path))
ri_mats_jc_path = os.path.join(self._out_dir_just_se, 'RI.MATS.JC.txt')
self.assertFalse(os.path.exists(ri_mats_jc_path))
ri_mats_jcec_path = os.path.join(self._out_dir_just_se,
'RI.MATS.JCEC.txt')
self.assertFalse(os.path.exists(ri_mats_jcec_path))
def _check_results_deferred_stat(self):
self._check_no_error_results()
se_mats_jc_path = os.path.join(self._out_dir_all, 'SE.MATS.JC.txt')
se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc(
se_mats_jc_path)
self.assertFalse(error)
self._check_se_mats_jc_header(se_mats_jc_header)
self.assertEqual(len(se_mats_jc_rows), 2)
for row in se_mats_jc_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '10,15')
self.assertEqual(row['SJC_SAMPLE_1'], '10,5')
self.assertEqual(row['IJC_SAMPLE_2'], '10,10')
self.assertEqual(row['SJC_SAMPLE_2'], '0,0')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']),
[0.333, 0.6])
self.assertEqual(self._read_floats(row['IncLevel2']),
[1.0, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.533)
elif row['exonStart_0base'] == '800':
self.assertEqual(row['IJC_SAMPLE_1'], '10,10')
self.assertEqual(row['SJC_SAMPLE_1'], '0,0')
self.assertEqual(row['IJC_SAMPLE_2'], '10,15')
self.assertEqual(row['SJC_SAMPLE_2'], '10,5')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']),
[1.0, 1.0])
self.assertEqual(self._read_floats(row['IncLevel2']),
[0.333, 0.6])
self.assertEqual(float(row['IncLevelDifference']), 0.533)
se_mats_jcec_path = os.path.join(self._out_dir_all, 'SE.MATS.JCEC.txt')
se_mats_jcec_header, se_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(se_mats_jcec_path))
self.assertFalse(error)
self._check_se_mats_jcec_header(se_mats_jcec_header)
self.assertEqual(len(se_mats_jcec_rows), 2)
for row in se_mats_jcec_rows:
self.assertIn(row['exonStart_0base'], ['200', '800'])
if row['exonStart_0base'] == '200':
self.assertEqual(row['IJC_SAMPLE_1'], '20,30')
self.assertEqual(row['SJC_SAMPLE_1'], '10,5')
self.assertEqual(row['IJC_SAMPLE_2'], '20,20')
self.assertEqual(row['SJC_SAMPLE_2'], '0,0')
tests.util.assert_within_bounds(self, float(row['PValue']), 0,
1)
tests.util.assert_within_bounds(self, float(row['FDR']), 0, 1)
self.assertEqual(self._read_floats(row['IncLevel1']),
[0.397, 0.664])
self.assertEqual(self._read_floats(row['IncLevel2']),
[1.0, 1.0])
self.assertEqual(float(row['IncLevelDifference']), -0.47)
mxe_mats_jc_path = os.path.join(self._out_dir_all, 'MXE.MATS.JC.txt')
mxe_mats_jc_header, mxe_mats_jc_rows, error = (
output_parser.parse_mats_jc(mxe_mats_jc_path))
self.assertFalse(error)
self._check_mxe_mats_jc_header(mxe_mats_jc_header)
self.assertEqual(len(mxe_mats_jc_rows), 1)
tests.util.assert_within_bounds(self,
float(mxe_mats_jc_rows[0]['FDR']), 0,
1)
mxe_mats_jcec_path = os.path.join(self._out_dir_all,
'MXE.MATS.JCEC.txt')
mxe_mats_jcec_header, mxe_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(mxe_mats_jcec_path))
self.assertFalse(error)
self._check_mxe_mats_jcec_header(mxe_mats_jcec_header)
self.assertEqual(len(mxe_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(mxe_mats_jcec_rows[0]['FDR']), 0,
1)
a5ss_mats_jc_path = os.path.join(self._out_dir_all, 'A5SS.MATS.JC.txt')
a5ss_mats_jc_header, a5ss_mats_jc_rows, error = (
output_parser.parse_mats_jc(a5ss_mats_jc_path))
self.assertFalse(error)
self._check_a35ss_mats_jc_header(a5ss_mats_jc_header)
self.assertEqual(len(a5ss_mats_jc_rows), 1)
tests.util.assert_within_bounds(self,
float(a5ss_mats_jc_rows[0]['FDR']), 0,
1)
a5ss_mats_jcec_path = os.path.join(self._out_dir_all,
'A5SS.MATS.JCEC.txt')
a5ss_mats_jcec_header, a5ss_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(a5ss_mats_jcec_path))
self.assertFalse(error)
self._check_a35ss_mats_jcec_header(a5ss_mats_jcec_header)
self.assertEqual(len(a5ss_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(a5ss_mats_jcec_rows[0]['FDR']),
0, 1)
a3ss_mats_jc_path = os.path.join(self._out_dir_all, 'A3SS.MATS.JC.txt')
a3ss_mats_jc_header, a3ss_mats_jc_rows, error = (
output_parser.parse_mats_jc(a3ss_mats_jc_path))
self.assertFalse(error)
self._check_a35ss_mats_jc_header(a3ss_mats_jc_header)
self.assertEqual(len(a3ss_mats_jc_rows), 1)
tests.util.assert_within_bounds(self,
float(a3ss_mats_jc_rows[0]['FDR']), 0,
1)
a3ss_mats_jcec_path = os.path.join(self._out_dir_all,
'A3SS.MATS.JCEC.txt')
a3ss_mats_jcec_header, a3ss_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(a3ss_mats_jcec_path))
self.assertFalse(error)
self._check_a35ss_mats_jcec_header(a3ss_mats_jcec_header)
self.assertEqual(len(a3ss_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(a3ss_mats_jcec_rows[0]['FDR']),
0, 1)
ri_mats_jc_path = os.path.join(self._out_dir_all, 'RI.MATS.JC.txt')
ri_mats_jc_header, ri_mats_jc_rows, error = output_parser.parse_mats_jc(
ri_mats_jc_path)
self.assertFalse(error)
self._check_ri_mats_jc_header(ri_mats_jc_header)
self.assertEqual(len(ri_mats_jc_rows), 1)
tests.util.assert_within_bounds(self, float(ri_mats_jc_rows[0]['FDR']),
0, 1)
ri_mats_jcec_path = os.path.join(self._out_dir_all, 'RI.MATS.JCEC.txt')
ri_mats_jcec_header, ri_mats_jcec_rows, error = (
output_parser.parse_mats_jcec(ri_mats_jcec_path))
self.assertFalse(error)
self._check_ri_mats_jcec_header(ri_mats_jcec_header)
self.assertEqual(len(ri_mats_jcec_rows), 1)
tests.util.assert_within_bounds(self,
float(ri_mats_jcec_rows[0]['FDR']), 0,
1)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"noreply@github.com"
] |
dontkme.noreply@github.com
|
16bdf1311ec0dd7bb5b4d66680fad7469efdf356
|
2f4b95f4b17337f02c21f33f7aa2ce11476898ff
|
/superlists/lists/tests/test_models.py
|
17334b2a932fbd11564264621a19bfdffdf5f4b6
|
[] |
no_license
|
Wowip/EjerciciosTDD
|
6c891d03250462408dbb25d5d36004c092d6dbe2
|
940c0fcae6a95998c1c013a35f92e659167de67b
|
refs/heads/master
| 2020-08-01T02:13:03.887136
| 2016-11-20T19:18:49
| 2016-11-20T19:18:49
| 73,586,488
| 0
| 0
| null | 2016-11-18T06:59:20
| 2016-11-13T00:21:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from lists.models import Item, List
class ItemModelTest(TestCase):
def test_default_text(self):
item = Item()
self.assertEqual(item.text, '')
def test_item_is_related_to_list(self):
list_ = List.objects.create()
item = Item()
item.list = list_
item.save()
self.assertIn(item, list_.item_set.all())
def test_cannot_save_empty_list_items(self):
list_ = List.objects.create()
item = Item(list=list_, text='')
with self.assertRaises(ValidationError):
item.save()
item.full_clean()
def test_duplicate_items_are_invalid(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text='bla')
with self.assertRaises(ValidationError):
item = Item(list=list_, text='bla')
item.full_clean()
def test_CAN_save_same_item_to_different_lists(self):
list1 = List.objects.create()
list2 = List.objects.create()
Item.objects.create(list=list1, text='bla')
item = Item(list=list2, text='bla')
item.full_clean() # should not raise
def test_list_ordering(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text='i1')
item2 = Item.objects.create(list=list1, text='item 2')
item3 = Item.objects.create(list=list1, text='3')
self.assertEqual(
list(Item.objects.all()),
[item1, item2, item3]
)
def test_string_representation(self):
item = Item(text='some text')
self.assertEqual(str(item), 'some text')
class ListModelTest(TestCase):
def test_get_absolute_url(self):
list_ = List.objects.create()
self.assertEqual(list_.get_absolute_url(), '/lists/%d/' % (list_.id,))
|
[
"arturoinosencio@gmail.com"
] |
arturoinosencio@gmail.com
|
abdf25273170a0e464ee6c988a08c42a21bbd8b0
|
f3a1629a46f5c3cbf7314c54fc36be3156146517
|
/venv/bin/sqlformat
|
f01aa4e0e6285b25c7aec0571805920953267bed
|
[] |
no_license
|
AlexsandroMO/qualiy_applications
|
ec4cdbcbacc9f403d7d34ca9573af44df9c9230a
|
08656c8368f10d54e5b9c8e4a758989239224dc6
|
refs/heads/main
| 2023-01-13T03:50:21.779274
| 2020-11-14T19:05:20
| 2020-11-14T19:05:20
| 305,471,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
#!/Users/alexsandromonteiro/Desktop/Prog_Python/qualiy_applications/venv/bin/python3.8
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"sandrobass@hotmail.com"
] |
sandrobass@hotmail.com
|
|
9193f6aad6ce7c106a99eac8bfc6eb8b4001266e
|
5f6fd8232989e9d033506c84083b909117d0086b
|
/thread/thread_listen_coding - 副本.py
|
bb7a3b4a7b13fe86e6a603f4739d428959d85913
|
[] |
no_license
|
seryte/int_python_pro
|
a4a9fd085c21adf6161bc8173f77b6b4c191adb2
|
fc0a1bc8d64f01d07cc63690f4bad04c7a20b07f
|
refs/heads/master
| 2020-03-09T06:02:35.139890
| 2018-05-16T03:42:20
| 2018-05-16T03:42:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-04-17 20:04:07
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
import threading,time
def listen(name,now):
print("listening:%s %s" %(name,now))
time.sleep(2)
print("listening 执行结束 %s"%time.strftime("%Y-%m-%d %H:%M:%S"))
def coding(name,now):
print("coding:%s %s"%(name,now))
time.sleep(5)
print("coding 执行结束 %s"%time.strftime("%Y-%m-%d %H:%M:%S"))
print("单线程调用:")
listen("如果没有如果",time.strftime("%Y-%m-%d %H:%M:%S"))
coding("python", time.strftime("%Y-%m-%d %H:%M:%S"))
thread = []
t1 = threading.Thread(target=listen, args=("说散就散",time.strftime("%Y-%m-%d %H:%M:%S")))
t2 = threading.Thread(target=coding, args=("codename",time.strftime("%Y-%m-%d %H:%M:%S")))
thread.append(t1)
thread.append(t2)
# print(thread)
print("\n多线程调用:")
# t1.start()
# t2.start()
for i in thread:
i.setDaemon(True)
i.start()
i.join()
|
[
"382643558@qq.com"
] |
382643558@qq.com
|
5540e36a51a6dfcea256b55d35fa3fa736c86e2e
|
e96bab82cc398aec30b8969288def8403c033207
|
/36_Data types function.py
|
dcceb7b9db3682f5ce76e3d3a2bec556373faceb
|
[] |
no_license
|
shiyanshirani/w3s-150-questions
|
125c7df920d6cc04e71232f0d994673aaa0d8d15
|
3d532ab7a70767d905f86423185e2abbee6d7c2b
|
refs/heads/master
| 2022-11-05T03:30:19.738858
| 2020-06-27T14:17:50
| 2020-06-27T14:17:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
def typesfunction(a, b):
if type(a)==int and type(b)==int:
print(a+b)
else:
print("Different types")
typesfunction("google", 5)
typesfunction(111,0)
|
[
"shiyan99@gmail.com"
] |
shiyan99@gmail.com
|
db0ec3865a75078674e752309dad2296cd1bbd26
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/19_数学/计算几何/直线/practice/面试题 16.14. 最佳直线 copy.py
|
9a95efaf08f17ae51ca4cf2e9edb0dae3b4f6f05
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,033
|
py
|
from typing import List
from collections import defaultdict
def gcd(a, b):
return b if a == 0 else gcd(b % a, b)
class Solution:
def bestLine(self, points: List[List[int]]) -> List[int]:
n = len(points)
res = []
maxCount = 0
for i in range(n):
x1, y1 = points[i]
counter = defaultdict(int)
firstpair = defaultdict(list)
for j in range(i + 1, n):
x2, y2 = points[j]
A, B = (y2 - y1), (x2 - x1)
if B == 0:
key = (0, 0)
else:
gcd_ = gcd(A, B)
key = (A / gcd_, B / gcd_)
counter[key] += 1
firstpair.setdefault(key, [i, j])
if counter[key] > maxCount: # 只有更多,才更新
maxCount = counter[key]
res = firstpair[key]
return res
print(
Solution().bestLine(
[
[-24272, -29606],
[-37644, -4251],
[2691, -22513],
[-14592, -33765],
[-21858, 28550],
[-22264, 41303],
[-6960, 12785],
[-39133, -41833],
[25151, -26643],
[-19416, 28550],
[-17420, 22270],
[-8793, 16457],
[-4303, -25680],
[-14405, 26607],
[-49083, -26336],
[22629, 20544],
[-23939, -25038],
[-40441, -26962],
[-29484, -30503],
[-32927, -18287],
[-13312, -22513],
[15026, 12965],
[-16361, -23282],
[7296, -15750],
[-11690, -21723],
[-34850, -25928],
[-14933, -16169],
[23459, -9358],
[-45719, -13202],
[-26868, 28550],
[4627, 16457],
[-7296, -27760],
[-32230, 8174],
[-28233, -8627],
[-26520, 28550],
[5515, -26001],
[-16766, 28550],
[21888, -3740],
[1251, 28550],
[15333, -26322],
[-27677, -19790],
[20311, 7075],
[-10751, 16457],
[-47762, -44638],
[20991, 24942],
[-19056, -11105],
[-26639, 28550],
[-19862, 16457],
[-27506, -4251],
[-20172, -5440],
[-33757, -24717],
[-9411, -17379],
[12493, 29906],
[0, -21755],
[-36885, -16192],
[-38195, -40088],
[-40079, 7667],
[-29294, -34032],
[-55968, 23947],
[-22724, -22513],
[20362, -11530],
[-11817, -23957],
[-33742, 5259],
[-10350, -4251],
[-11690, -22513],
[-20241, -22513],
]
)
)
|
[
"lmt2818088@gmail.com"
] |
lmt2818088@gmail.com
|
f2d017f5816dc45eeaf51e3c46e9eea79e6f4cc8
|
56fd010ffa40a8496a61ca4974ab136c55add8f6
|
/Bruit_TrafficRoutier/Zone_bruit.py
|
3635ac3c5a55fd3a7bc5b31513c5c08989a2ec68
|
[] |
no_license
|
BarAmina/Python_ArcPy
|
1f185894888acf1d8cab20d17b3cbd65b09a82e9
|
ffe5db519ddf60838096b27acb283c3fb44f1ac6
|
refs/heads/master
| 2020-08-04T22:25:03.955430
| 2019-10-11T21:34:23
| 2019-10-11T21:34:23
| 212,297,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,602
|
py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Nom du script : Zone_bruit.py
# Objet : Génération de zones d'exposition au bruit du traffic routier
#-------------------------------------------------------------------------------
# Import arcpy module
import arcpy
# Configuration de l'environnement de géotraitement
arcpy.env.overwriteOutput = True
# Paramétrage des données en entrée
iris = arcpy.GetParameterAsText(0)
rue = arcpy.GetParameterAsText(1)
# Paramétrage des données intermédiaires
rueBuffer = ur"{0}_buffer".format(rue)
irisClip = ur"{0}_clip".format(iris)
arcpy.AddMessage(u"Calcul du nombre d'habitants exposés au bruit...")
# Ajout d'un champ "Surface_complète" à la classe d'entités des IRIS
arcpy.AddField_management(iris, u"Surface_complète", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Renseignement du champ avec la surface de l'IRIS
arcpy.CalculateField_management(iris, u"Surface_complète", "[Shape_Area]", "VB", "")
arcpy.AddMessage(u"=> Calcul de la surface totale des IRIS OK")
# Ajout d'un champ "Distance" à la classe d'entités des rues
arcpy.AddField_management(rue, "Distance", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Renseignement du champ avec une portée du bruit, exprimée en mètres, variable suivant l'importance de la rue
arcpy.CalculateField_management(rue, "Distance", "Calc_Imp( !IMPORTANCE! )", "PYTHON", "def Calc_Imp(imp):\\n if imp == '1':\\n return 100\\n elif imp == '2':\\n return 50\\n elif imp == '3':\\n return 20\\n else:\\n return 0")
arcpy.AddMessage(u"=> Calcul de la portée du bruit de chaque rue OK")
# Création de zones tampons autour des rues avec les distances calculées
arcpy.Buffer_analysis(rue, rueBuffer, "Distance", "FULL", "ROUND", "ALL", "", "PLANAR")
arcpy.AddMessage(u"=> Création des zones d'exposition au bruit OK")
# Découpage des IRIS suivant les zones d'exposition au bruit
arcpy.Clip_analysis(iris, rueBuffer, irisClip, "")
arcpy.AddMessage(u"=> Découpage des IRIS suivant les zones d'exposition au bruit OK")
# Ajout d'un champ Pop_bruit à la classe d'analyse
arcpy.AddField_management(irisClip, "Pop_bruit", "SHORT", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
# Renseignement du champ avec la population des IRIS découpées
arcpy.CalculateField_management(irisClip, "Pop_bruit", "[P07_POP] / [Surface_complète] * [Shape_Area]", "VB", "")
arcpy.AddMessage(u"Calcul du nombre d'habitants exposés au bruit OK")
|
[
"noreply@github.com"
] |
BarAmina.noreply@github.com
|
c4501bbe8f20b230b42ebcc48b2dc670340da586
|
573307ca913f2bc73bcc498efee67cf61fc29800
|
/lessThanList.py
|
dbe067a8601c65e27c6c6b186513e280875254e8
|
[] |
no_license
|
JstD/PPL_FP
|
553d7af369f19a98e7c20d2a37216f253d01a2aa
|
e45e6d7412e60528d1faef9f9ccf09acdb57d30a
|
refs/heads/master
| 2023-03-29T20:12:34.713100
| 2021-04-03T07:00:39
| 2021-04-03T07:00:39
| 354,223,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
def lessThan(lst,num):
return [x for x in lst if x<num]
print(lessThan([1,2,3,4,5],4))
|
[
"dung.truong2000@hcmut.edu.vn"
] |
dung.truong2000@hcmut.edu.vn
|
72f4df5dc0c06e943c09f1516ec2cc3850c46820
|
a6ceb9e0429f02275159d95142e515b01a6d47b5
|
/bmi3d/open_hdf_tables.py
|
ca29b86e76d62db2388d3ccfc665e2a6865cca1f
|
[] |
no_license
|
m-nolan/aopy_dev
|
0e699236ea0b82deed1781cf8dcecbe9ecc56c27
|
794200222e0ac73dc98cf2adc28c63b8e4f18040
|
refs/heads/main
| 2023-04-02T13:15:21.098852
| 2021-03-30T20:01:39
| 2021-03-30T20:01:39
| 344,970,001
| 0
| 0
| null | 2021-03-25T23:11:58
| 2021-03-06T00:35:16
|
Python
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
import tables
import numpy
import matplotlib.pyplot as plt
#replace this with your hdf filename
#fname = 'c:\\Users\\Si Jia\\AppData\\Local\\Temp\\tmp9fswwtwp.h5'
fname = '/tmp/tmpdcbqn2zo.h5'
hdffile = tables.open_file(fname,'r') #read-onl
print(hdffile)
#get table information
# more methods refer to this
# https://www.pytables.org/usersguide/libref/structured_storage.html#tables.Table
table = hdffile.root.task
print(table.description)
#look at cursor trajectory
cursor_coor = table.col('cursor')
plt.plot(cursor_coor[:,0], cursor_coor[:,2])
plt.show()
|
[
"manolan@uw.edu"
] |
manolan@uw.edu
|
fcc0647cba72938ea38d2263d8274cac61efb22f
|
e29713b4e094b17ee6e0fdc88624825cd649abc4
|
/Python Codes 2/phone_letters.py
|
48a727c375ee1bb9e2ec702d9ca9f5aa6f0cae22
|
[] |
no_license
|
debolina-ca/my-projects
|
272fca1c961cc68b85e28226a2afc6d4929ba26e
|
b8c433d5a35645da8ad2fcb28238a40daa18bc9b
|
refs/heads/master
| 2020-12-27T18:42:06.248234
| 2020-02-03T19:31:31
| 2020-02-03T19:31:31
| 238,008,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
# Phone Letters
phone_letters = [
"1 = ''", "2 = 'ABC'", "3 = 'DEF'",
"4 = 'GHI'", "5 = 'JKL'", "6 = 'MNO'",
"7 = 'PQRS'", "8 = 'TUV'", "9 = 'WXYZ'",
"*", "0 = ' '", "#"
]
print(phone_letters[0:3])
print(phone_letters[3:6])
print(phone_letters[6:9])
print(phone_letters[9:12])
|
[
"noreply@github.com"
] |
debolina-ca.noreply@github.com
|
6831b0fbb7a6dadcaef55a6df4497df57ec91df1
|
2b45cbccd03fb09be78b2241d05beeae171a2e18
|
/字节跳动测试开发工程师面试准备/reverseList.py
|
58fa0f6880134d2ea79618baafedb614640d8d8f
|
[] |
no_license
|
MaoningGuan/LeetCode
|
c90f78ce87a8116458a86c49dbe32e172036f7b4
|
62419b49000e79962bcdc99cd98afd2fb82ea345
|
refs/heads/master
| 2023-01-03T14:52:04.278708
| 2020-11-01T12:15:41
| 2020-11-01T12:15:41
| 282,859,997
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,567
|
py
|
# -*- coding: utf-8 -*-
"""
206. 反转链表
反转一个单链表。
示例:
输入: 1->2->3->4->5->NULL
输出: 5->4->3->2->1->NULL
进阶:
你可以迭代或递归地反转链表。你能否用两种方法解决这道题?
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def generateList(l: list) -> ListNode:
prenode = ListNode(0)
lastnode = prenode
for val in l:
lastnode.next = ListNode(val)
lastnode = lastnode.next
return prenode.next
def printList(l: ListNode):
while l:
print("%d, " % (l.val), end='')
l = l.next
print('')
class Solution:
def reverseList_recursive(self, head: ListNode) -> ListNode:
"""
递归解法
:type head: ListNode
:rtype: ListNode
"""
# 递归终止条件是当前为空,或者下一个节点为空
if (head == None or head.next == None):
return head
# 这里的cur就是最后一个节点
cur = self.reverseList_recursive(head.next)
# 这里请配合动画演示理解
# 如果链表是 1->2->3->4->5,那么此时的cur就是5
# 而head是4,head的下一个是5,下下一个是空
# 所以head.next.next 就是5->4
head.next.next = head
# 防止链表循环,需要将head.next设置为空(如:5 -> 4 -> 3 -> 2 <-> 1)
# if head.val == 1:
# head.next = None
head.next = None
# 每层递归函数都返回cur,也就是最后一个节点
return cur
def reverseList_iterate(self, head: ListNode) -> ListNode:
"""
迭代解法
:type head: ListNode
:rtype: ListNode
"""
# 申请两个节点,pre和 cur,pre指向None
pre = None
cur = head
# 遍历链表,while循环里面的内容其实可以写成一行
# 这里只做演示,就不搞那么骚气的写法了
while cur:
# pre, cur.next, cur = cur, pre, cur.next
# 记录当前节点的下一个节点
tmp = cur.next
# 然后将当前节点指向pre
cur.next = pre
# pre和cur节点都前进一位
pre = cur
cur = tmp
return pre
if __name__ == '__main__':
solution = Solution()
nodes = [1, 2, 3, 4, 5]
linked_list = generateList(nodes)
reversed_link = solution.reverseList_iterate(linked_list)
printList(reversed_link)
|
[
"1812711281@qq.com"
] |
1812711281@qq.com
|
935fa502303057323606ce4a66327440a1ef0018
|
f150f2dce61cdbaaa244e118c9e81e83a4886458
|
/eigenpro/training.py
|
5b296f7d919e0d707bb159e6248c2a240aa00f39
|
[
"MIT"
] |
permissive
|
johannespitz/kernel-overfitting
|
e060ca00fef7f7e563616dcc68909737a4449b5a
|
a654aaa49afc302d4e96a99cafebed0f5bbbd825
|
refs/heads/master
| 2020-05-23T22:34:07.844753
| 2020-04-24T19:50:19
| 2020-04-24T19:50:19
| 186,976,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,093
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[28]:
'''
Modified version of https://github.com/EigenPro/EigenPro-tensorflow
in particular run_expr.py
'''
from __future__ import print_function
import keras
import numpy as np
import time
import warnings
from dataclasses import dataclass
from scipy.stats import bernoulli
from distutils.version import StrictVersion
from keras.layers import Dense, Input
from keras.models import Model
from keras import backend as K
from eigenpro import kernels
from eigenpro import mnist
from eigenpro import ciphar
from eigenpro import synthetic
from eigenpro import utils
from eigenpro.backend_extra import hasGPU
from eigenpro.layers import KernelEmbedding, RFF
from eigenpro.optimizers import PSGD, SGD
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.8'), "Requires Keras (>=2.0.8)."
if StrictVersion(keras.__version__) > StrictVersion('2.0.8'):
warnings.warn('\n\nEigenPro-tensorflow has been tested with Keras 2.0.8. '
'If the\ncurrent version (%s) fails, '
'switch to 2.0.8 by command,\n\n'
'\tpip install Keras==2.0.8\n\n' %(keras.__version__), Warning)
assert keras.backend.backend() == u'tensorflow', "Requires Tensorflow (>=1.2.1)."
assert hasGPU(), "Requires GPU."
# In[29]:
'''
Modified version of https://github.com/EigenPro/EigenPro-tensorflow
in particular kernels.py
'''
def D2(X, Y):
XX = np.sum(np.square(X), axis = 1, keepdims=True)
if X is Y:
YY = XX
else:
YY = np.sum(np.square(Y), axis = 1, keepdims=True)
XY = np.dot(X, np.transpose(Y))
d2 = np.reshape(XX, (np.shape(X)[0], 1)) + np.reshape(YY, (1, np.shape(Y)[0])) - 2 * XY
return d2
def Gaussian(X, Y, s):
assert s > 0
d2 = D2(X, Y)
gamma = np.float32(1. / (2 * s ** 2))
G = np.exp(-gamma * np.clip(d2, 0, None))
return G
def Laplace(X, Y, s):
assert s > 0
d2 = np.clip(D2(X, Y), 0, None)
d = np.sqrt(d2)
G = np.exp(- d / s)
return G
# In[30]:
def add_noise(y, noise):
n, dim = y.shape
y = np.argmax(y, axis=1)
change = np.array(bernoulli.rvs(noise / 100, size=n))
change = change * np.random.randint(dim, size=n)
y = np.mod(y + change, dim)
if noise == 100:
y = np.random.randint(dim, size=n)
return keras.utils.to_categorical(y, dim)
def my_norm(alpha, K):
cross_norm = alpha.T.dot(K).dot(alpha)
return np.sum(np.sqrt(np.diag(cross_norm)))
# In[38]:
def training(data_set_dict, kernel_dict, size_list, noise_list, MAXEPOCH=100):
trainers = {}
for dataset_name, ((x_train_full, y_train_full), (x_test_full, y_test_full)) in data_set_dict.items():
_, num_classes = y_train_full.shape
for kernel_name, (kernel_sgd, kernel_inv) in kernel_dict.items():
for size in size_list:
for noise in noise_list:
name = 'D:' + dataset_name + ' K:' + kernel_name + ' S:' + str(size) + ' N:' + str(noise)
print(name)
trainer = {'dataset': dataset_name, 'kernel': kernel_name, 'size': size, 'noise': noise}
x_train = x_train_full[0:size]
x_test = x_test_full
y_train = add_noise(y_train_full[0:size], noise)
y_test = add_noise(y_test_full, noise)
# Set the hyper-parameters.
bs = 256 # size of the mini-batch
M = min(size, 5000) # (EigenPro) subsample size
k = min(size - 1, 160) # (EigenPro) top-k eigensystem
n, D = x_train.shape # (n_sample, n_feature)
# Calculate step size and (Primal) EigenPro preconditioner.
kf, scale, s0 = utils.asm_eigenpro_f(
x_train, kernel_sgd, M, k, 1, in_rkhs=True)
eta = np.float32(1.5 / s0) # 1.5 / s0
eta = eta * num_classes # correction due to mse loss
input_shape = (D+1,) # n_feature, (sample) index
ix = Input(shape=input_shape, dtype='float32', name='indexed-feat-')
x, index = utils.separate_index(ix) # features, sample_id
kfeat = KernelEmbedding(kernel_sgd, x_train, input_shape=(D,))(x)
# Assemble kernel EigenPro trainer.
y = Dense(num_classes, input_shape=(n,),
activation='linear',
kernel_initializer='zeros',
use_bias=False,
name='trainable')(kfeat)
model = Model(ix, y)
model.compile(loss='mse', optimizer=PSGD(pred_t=y,
index_t=index,
eta=scale*eta,
eigenpro_f=lambda g: kf(g, kfeat)),
metrics=['accuracy'])
model.summary(print_fn=print)
print()
initial_epoch=0
np.random.seed(1) # Keras uses numpy random number generator
train_ts = 0 # training time in seconds
print("Stochastic Gradient Descent")
for epoch in range(1, MAXEPOCH + 1):
start = time.time()
model.fit(
utils.add_index(x_train), y_train,
batch_size=bs, epochs=epoch, verbose=0,
validation_data=(utils.add_index(x_test), y_test),
initial_epoch=initial_epoch)
train_ts += time.time() - start
tr_score = model.evaluate(utils.add_index(x_train), y_train, verbose=0)
te_score = model.evaluate(utils.add_index(x_test), y_test, verbose=0)
initial_epoch = epoch
if tr_score[1] == 1.0:
trainer['sgd_ce'] = 1 - te_score[1]
trainer['iterations'] = epoch
print("train error: %.2f%%\ttest error: %.2f%% (%d epochs, %.2f seconds)" %
((1 - tr_score[1]) * 100, (1 - te_score[1]) * 100, epoch, train_ts))
print("Zero Train Error")
print()
break
if epoch == MAXEPOCH:
trainer['sgd_ce'] = 1 - te_score[1]
trainer['iterations'] = 999999
print("train error: %.2f%%\ttest error: %.2f%% (%d epochs, %.2f seconds)" %
((1 - tr_score[1]) * 100, (1 - te_score[1]) * 100, epoch, train_ts))
print("Did not reach Zero Train Error")
print()
break
if epoch % 5 == 1:
print("train error: %.2f%%\ttest error: %.2f%% (%d epochs, %.2f seconds)" %
((1 - tr_score[1]) * 100, (1 - te_score[1]) * 100, epoch, train_ts))
alpah_sgd = np.array(model.get_layer("trainable").get_weights()[0])
del model
utils.reset()
# linear system
K_train = kernel_inv(x_train, x_train)
if size <= 20000:
alpha = np.linalg.solve(K_train, y_train)
## this was a test -> alpha and the trainable layer are interchangable
# alpha = model.get_layer("trainable").get_weights()[0]
K_test = kernel_inv(x_train, x_test)
pred = K_test.T.dot(alpha)
miss_count = np.count_nonzero(np.argmax(pred, axis=1) - np.argmax(y_test, axis=1))
miss_rate = miss_count / y_test.shape[0]
trainer['inv_ce'] = miss_rate
print("Linear Interpolation")
print("Classification Error = " + str(miss_rate))
print()
trainer['inv_norm'] = my_norm(alpha, K_train)
trainer['sgd_norm'] = my_norm(alpah_sgd, K_train)
trainers[name] = trainer
K_train = None
K_test = None
utils.reset()
print()
print()
print(trainers)
print()
print()
print("Done")
return trainers
|
[
"johannes.pitz@tum.de"
] |
johannes.pitz@tum.de
|
3c8054330d2faa9eac755a30f468ca9a147bf06a
|
673f1a46e00cd3239e459138d9e592ce34dc5b26
|
/demo/migrations/0001_initial.py
|
90360a97867d16368e68e66a47ba3821b8256616
|
[] |
no_license
|
jw1174184386/rolemanage
|
59db121dfc91fb557d036c8066f7328a9df3c87a
|
e0f844b6890c70702d3c4333741336570b6a384f
|
refs/heads/master
| 2020-04-15T12:13:19.330381
| 2019-01-08T14:24:08
| 2019-01-08T14:24:08
| 164,665,320
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,849
|
py
|
# Generated by Django 2.1.2 on 2019-01-08 03:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='动作名')),
('action', models.CharField(max_length=50, verbose_name='具体动作')),
],
options={
'verbose_name': '动作表',
'verbose_name_plural': '动作表',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='菜单名')),
('parent', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Menu')),
],
options={
'verbose_name': '菜单表',
'verbose_name_plural': '菜单表',
},
),
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='权限名称')),
('url', models.CharField(max_length=200, verbose_name='权限URL')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Menu')),
],
options={
'verbose_name': '权限表',
'verbose_name_plural': '权限表',
},
),
migrations.CreateModel(
name='PermissionAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Action', verbose_name='动作')),
('permission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Permission', verbose_name='权限')),
],
options={
'verbose_name': '权限动作中间表',
'verbose_name_plural': '权限动作中间表',
},
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='角色名称')),
],
options={
'verbose_name': '角色表',
'verbose_name_plural': '角色表',
},
),
migrations.CreateModel(
name='RolePermissionAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permission_action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.PermissionAction')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Role')),
],
options={
'verbose_name': '角色和权限动作中间表',
'verbose_name_plural': '角色和权限动作中间表',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50, verbose_name='用户名')),
('password', models.CharField(max_length=32, verbose_name='密码')),
],
options={
'verbose_name': '用户表',
'verbose_name_plural': '用户表',
},
),
migrations.CreateModel(
name='UserRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.Role')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='demo.User')),
],
options={
'verbose_name': '用户和角色之间的中间表',
'verbose_name_plural': '用户和角色之间的中间表',
},
),
]
|
[
"jw1174184386@163.com"
] |
jw1174184386@163.com
|
29274b90fb731a1e2a67393d3e011a6ce556f727
|
965a462e251e705dee793cc2ab9ef2acaa1f2b1f
|
/SCons_zipit.py
|
752f39714f7581bb78d9b78fe6771c68640fcc24
|
[] |
no_license
|
Embedded-Systems-Spring-2020/lab4
|
786e4dca964c6294b606ae136ce151ef26d64e6b
|
916539dbe90fd11e869fa20c6233359d20f640b9
|
refs/heads/master
| 2021-01-02T12:55:17.007775
| 2020-02-24T17:03:27
| 2020-02-24T17:03:27
| 239,633,582
| 0
| 1
| null | 2020-02-24T05:06:52
| 2020-02-10T23:15:58
|
C
|
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
# .. "Copyright (c) 2008 Robert B. Reese, Bryan A. Jones, J. W. Bruce ("AUTHORS")"
# All rights reserved.
# (R. Reese, reese_AT_ece.msstate.edu, Mississippi State University)
# (B. A. Jones, bjones_AT_ece.msstate.edu, Mississippi State University)
# (J. W. Bruce, jwbruce_AT_ece.msstate.edu, Mississippi State University)
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement is
# hereby granted, provided that the above copyright notice, the following
# two paragraphs and the authors appear in all copies of this software.
#
# IN NO EVENT SHALL THE "AUTHORS" BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE "AUTHORS"
# HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE "AUTHORS" SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE "AUTHORS" HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# Please maintain this header in its entirety when copying/modifying
# these files.
#
# ***************************************************************
# SCons_zipit.py - Build docs then create a .zip for distribution
# ***************************************************************
# Bring in path for SCons use.
import os
env = Environment(ENV = {'PATH' : os.environ['PATH']})
# Update docs.
env.Execute('doxygen')
# Copy updated CodeChat docs into Doxygen output.
env.Execute('sphinx-build -d _build\\doctrees . _build\\html')
env.Execute(Delete('docs/sphinx', must_exist = 0))
env.Execute(Copy('docs/sphinx', '_build/html'))
# Define a single target to build the zip file.
zip_file = '../pic24_code_examples.zip'
env.Default(env.Zip(zip_file, [
'readme.txt',
'standard_header.txt',
'bin',
'bootloader',
'docs',
'hex',
'chap03',
'chap04',
'chap06',
'chap07',
'chap08',
'chap09',
'chap10',
'chap11',
'chap12',
'chap13',
'BUILD_DIR',
'esos',
'lib/lkr',
'lib/src',
'lib/include',
'explorer16_100p',
'util' ]))
|
[
"49951619+ryan-shoemake@users.noreply.github.com"
] |
49951619+ryan-shoemake@users.noreply.github.com
|
e5159573514325ddff9f0569957cbbf720a52f0e
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/B_HashTable/Basic/L1_2593_Find_Score_of_an_Array_After_Marking_All_Elements.py
|
c21970e05cffcfd0aa4f1a71aaa041f071a3ed8a
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177
| 2023-06-16T16:25:39
| 2023-06-16T16:25:39
| 69,733,624
| 14
| 3
| null | 2022-05-25T06:48:38
| 2016-10-01T10:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 493
|
py
|
""" https://leetcode.com/problems/find-score-of-an-array-after-marking-all-elements/
Implement the problem description using a seen array
"""
from header import *
class Solution:
def findScore(self, A: List[int]) -> int:
seen = [0]*(len(A)+2)
ans = 0
for i, x in sorted(enumerate(A), key=lambda x: x[1]):
if not seen[i]:
ans += x
seen[i] = 1
seen[i-1] = 1
seen[i+1] = 1
return ans
|
[
"zhengyuan.zhu@mavs.uta.edu"
] |
zhengyuan.zhu@mavs.uta.edu
|
574a978e3031c00ce0d37a59ee379800c4f2d854
|
a3b306df800059a5b74975793251a28b8a5f49c7
|
/Graphs/LX-2/molecule_otsu = False/BioImageXD-1.0/ITK/lib/InsightToolkit/WrapITK/Configuration/Languages/SwigInterface/pygccxml-1.0.0/pygccxml/parser/scanner.py
|
967c877a080cf5e421a7027aab7eb78b513ab7b5
|
[
"BSL-1.0"
] |
permissive
|
giacomo21/Image-analysis
|
dc17ba2b6eb53f48963fad931568576fda4e1349
|
ea8bafa073de5090bd8f83fb4f5ca16669d0211f
|
refs/heads/master
| 2016-09-06T21:42:13.530256
| 2013-07-22T09:35:56
| 2013-07-22T09:35:56
| 11,384,784
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,301
|
py
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
import types
import pprint
import xml.sax
import warnings
import xml.sax.handler
from pygccxml.declarations import *
from pygccxml import utils
##convention
#XML_NN - XML Node Name
#XML_AN - XML Attribute Name
#also those constants are sorted for easy searching.
XML_AN_ABSTRACT = "abstract"
XML_AN_ACCESS = "access"
XML_AN_ALIGN = "align"
XML_AN_ARTIFICIAL = "artificial"
XML_AN_ATTRIBUTES = "attributes"
XML_AN_BASE_TYPE = "basetype"
XML_AN_BASES = "bases"
XML_AN_BITS = "bits"
XML_AN_CONST = "const"
XML_AN_CONTEXT = "context"
XML_AN_CVS_REVISION = "cvs_revision"
XML_AN_DEFAULT = "default"
XML_AN_DEMANGLED = "demangled"
XML_AN_EXTERN = "extern"
XML_AN_FILE = "file"
XML_AN_ID = "id"
XML_AN_INCOMPLETE = "incomplete"
XML_AN_INIT = "init"
XML_AN_LINE = "line"
XML_AN_MANGLED = "mangled"
XML_AN_MAX = "max"
XML_AN_MEMBERS = "members"
XML_AN_MUTABLE = "mutable"
XML_AN_NAME = "name"
XML_AN_OFFSET = "offset"
XML_AN_PURE_VIRTUAL = "pure_virtual"
XML_AN_RESTRICT = "restrict"
XML_AN_RETURNS = "returns"
XML_AN_SIZE = "size"
XML_AN_STATIC = "static"
XML_AN_THROW = "throw"
XML_AN_TYPE = "type"
XML_AN_VIRTUAL = "virtual"
XML_AN_VOLATILE = "volatile"
XML_NN_ARGUMENT = "Argument"
XML_NN_ARRAY_TYPE = "ArrayType"
XML_NN_CASTING_OPERATOR = "Converter"
XML_NN_CLASS = "Class"
XML_NN_CONSTRUCTOR = "Constructor"
XML_NN_CV_QUALIFIED_TYPE = "CvQualifiedType"
XML_NN_DESTRUCTOR = "Destructor"
XML_NN_ELLIPSIS = "Ellipsis"
XML_NN_ENUMERATION = "Enumeration"
XML_NN_ENUMERATION_VALUE = "EnumValue"
XML_NN_FIELD = "Field"
XML_NN_FILE = "File"
XML_NN_FUNCTION = "Function"
XML_NN_FUNCTION_TYPE = "FunctionType"
XML_NN_FUNDAMENTAL_TYPE = "FundamentalType"
XML_NN_FREE_OPERATOR = "OperatorFunction"
XML_NN_GCC_XML = "GCC_XML"
XML_NN_MEMBER_OPERATOR = "OperatorMethod"
XML_NN_METHOD = "Method"
XML_NN_METHOD_TYPE = "MethodType"
XML_NN_NAMESPACE = "Namespace"
XML_NN_OFFSET_TYPE = "OffsetType"
XML_NN_POINTER_TYPE = "PointerType"
XML_NN_REFERENCE_TYPE = "ReferenceType"
XML_NN_ROOT = "GCC_XML"
XML_NN_STRUCT = "Struct"
XML_NN_TYPEDEF = "Typedef"
XML_NN_UNION = "Union"
XML_NN_VARIABLE = "Variable"
class scanner_t( xml.sax.handler.ContentHandler ):
def __init__(self, gccxml_file, decl_factory, *args ):
xml.sax.handler.ContentHandler.__init__(self, *args )
self.logger = utils.loggers.gccxml
self.gccxml_file = gccxml_file
#defining parsing tables
self.__readers = {
XML_NN_FILE : self.__read_file
, XML_NN_NAMESPACE : self.__read_namespace
, XML_NN_ENUMERATION : self.__read_enumeration
, XML_NN_ENUMERATION_VALUE : self.__read_enumeration_value
, XML_NN_ARRAY_TYPE : self.__read_array_type
, XML_NN_CV_QUALIFIED_TYPE : self.__read_cv_qualified_type
, XML_NN_POINTER_TYPE : self.__read_pointer_type
, XML_NN_REFERENCE_TYPE : self.__read_reference_type
, XML_NN_FUNDAMENTAL_TYPE : self.__read_fundamental_type
, XML_NN_ARGUMENT : self.__read_argument
, XML_NN_FUNCTION_TYPE : self.__read_function_type
, XML_NN_METHOD_TYPE : self.__read_method_type
, XML_NN_OFFSET_TYPE : self.__read_offset_type
, XML_NN_TYPEDEF : self.__read_typedef
, XML_NN_VARIABLE : self.__read_variable
, XML_NN_CLASS : self.__read_class
, XML_NN_STRUCT : self.__read_struct
, XML_NN_UNION : self.__read_union
, XML_NN_FIELD : self.__read_field
, XML_NN_CASTING_OPERATOR : self.__read_casting_operator
, XML_NN_CONSTRUCTOR : self.__read_constructor
, XML_NN_DESTRUCTOR : self.__read_destructor
, XML_NN_FUNCTION : self.__read_function
, XML_NN_FREE_OPERATOR : self.__read_free_operator
, XML_NN_MEMBER_OPERATOR : self.__read_member_operator
, XML_NN_METHOD : self.__read_method
, XML_NN_GCC_XML : self.__read_version
, XML_NN_ELLIPSIS : self.__read_ellipsis
}
self.deep_declarations = [
XML_NN_CASTING_OPERATOR
, XML_NN_CONSTRUCTOR
, XML_NN_DESTRUCTOR
, XML_NN_ENUMERATION
, XML_NN_FILE
, XML_NN_FUNCTION
, XML_NN_FREE_OPERATOR
, XML_NN_MEMBER_OPERATOR
, XML_NN_METHOD
, XML_NN_FUNCTION_TYPE
, XML_NN_METHOD_TYPE
]
assert isinstance( decl_factory, decl_factory_t )
self.__decl_factory = decl_factory
#mapping from id -> decl
self.__declarations = {}
#list of all read declarations
self.__calldefs = []
#list of enums I need later
self.__enums = []
#mapping from id -> type
self.__types = {}
#mapping from id -> file
self.__files = {}
#mapping between decl id -> access
self.__access = {}
#current object under construction
self.__inst = None
#mapping from id to members
self.__members = {}
self.__compiler = None
def read( self ):
xml.sax.parse( self.gccxml_file, self )
def endDocument( self ):
#updating membership
members_mapping = {}
for gccxml_id, members in self.__members.iteritems():
decl = self.__declarations.get( gccxml_id, None )
if not decl or not isinstance( decl, scopedef_t):
continue
members_mapping[ id( decl ) ] = members
self.__members = members_mapping
def declarations(self):
return self.__declarations
def calldefs( self ):
return self.__calldefs
def enums(self):
return self.__enums
def types(self):
return self.__types
def files(self):
return self.__files
def access(self):
return self.__access
def members(self):
return self.__members
def startElementNS(self, name, qname, attrs):
return self.startElement( name[1], attrs )
def endElementNS(self, name, qname):
return self.endElement( name[1] )
def startElement(self, name, attrs):
try:
if name not in self.__readers:
return
obj = self.__readers[name]( attrs )
if not obj:
return #it means that we worked on internals
#for example EnumValue of function argument
if name in self.deep_declarations:
self.__inst = obj
self.__read_access( attrs )
element_id = attrs.get(XML_AN_ID, None)
if isinstance( obj, declaration_t ):
obj.compiler = self.__compiler
self.__update_membership( attrs )
self.__declarations[ element_id ] = obj
if not isinstance( obj, namespace_t ):
self.__read_location( obj, attrs )
if isinstance( obj, class_t):
self.__read_bases( obj, attrs )
self.__read_artificial(obj, attrs)
self.__read_mangled( obj, attrs)
self.__read_demangled( obj, attrs)
self.__read_attributes(obj, attrs)
elif isinstance( obj, type_t ):
self.__types[ element_id ] = obj
self.__read_byte_size(obj, attrs)
self.__read_byte_align(obj, attrs)
elif isinstance( obj, types.StringTypes ):
self.__files[ element_id ] = obj
else:
self.logger.warning( 'Unknown object type has been found.'
+ ' Please report this bug to pygccxml development team.' )
except Exception, error:
msg = 'error occured, while parsing element with name "%s" and attrs "%s".'
msg = msg + os.linesep + 'Error: %s.' % str( error )
self.logger.error( msg % ( name, pprint.pformat( attrs.keys() ) ) )
raise
def endElement(self, name):
if name in self.deep_declarations:
self.__inst = None
def __read_location(self, decl, attrs):
decl.location = location_t( file_name=attrs[XML_AN_FILE], line=int(attrs[XML_AN_LINE]))
def __update_membership(self, attrs):
parent = attrs.get( XML_AN_CONTEXT, None )
if not parent:
return
if not self.__members.has_key( parent ):
self.__members[ parent ] = []
self.__members[parent].append( attrs[XML_AN_ID] )
def __read_members(self, decl, attrs ):
decl.declarations = attrs.get(XML_AN_MEMBERS, "")
def __read_bases(self, decl, attrs ):
decl.bases = attrs.get( XML_AN_BASES, "" )
def __read_artificial( self, decl, attrs ):
decl.is_artificial = attrs.get( XML_AN_ARTIFICIAL, False )
def __read_mangled( self, decl, attrs ):
decl.mangled = attrs.get( XML_AN_MANGLED, None )
def __read_demangled( self, decl, attrs ):
decl.demangled = attrs.get( XML_AN_DEMANGLED, None )
def __read_attributes( self, decl, attrs ):
decl.attributes = attrs.get( XML_AN_ATTRIBUTES, None )
def __read_access( self, attrs ):
self.__access[ attrs[XML_AN_ID] ] = attrs.get( XML_AN_ACCESS, ACCESS_TYPES.PUBLIC )
def __read_byte_size (self, decl, attrs):
"Using duck typing to set the size instead of in constructor"
size = attrs.get(XML_AN_SIZE, 0)
decl.byte_size = int(size)/8 # Make sure the size is in bytes instead of bits
def __read_byte_offset (self, decl, attrs):
"Using duck typing to set the offset instead of in constructor"
offset = attrs.get(XML_AN_OFFSET, 0)
decl.byte_offset = int(offset)/8 # Make sure the size is in bytes instead of bits
def __read_byte_align (self, decl, attrs):
"Using duck typing to set the alignment"
align = attrs.get(XML_AN_ALIGN, 0)
decl.byte_align = int(align)/8 # Make sure the size is in bytes instead of bits
def __read_root(self, attrs):
pass
def __read_file( self, attrs ):
return attrs.get( XML_AN_NAME, '' )
def __read_namespace(self, attrs):
ns_name = attrs.get( XML_AN_NAME, '' )
if '.' in ns_name:
#if '.' in namespace then this is mangled namespace -> in c++ namespace{...}
#that is almost true: gcc mangale name using top file name.
#almost all files has '.' in name
ns_name = ''
return self.__decl_factory.create_namespace( name=ns_name )
def __read_enumeration(self, attrs):
enum_name = attrs.get( XML_AN_NAME, '' )
if '$_' in enum_name or '._' in enum_name:
#it means that this is unnamed enum. in c++ enum{ x };
enum_name = ''
decl = self.__decl_factory.create_enumeration( name=enum_name )
self.__read_byte_size(decl, attrs)
self.__read_byte_align(decl, attrs)
self.__enums.append( decl )
return decl
def __read_enumeration_value( self, attrs ):
name = attrs.get( XML_AN_NAME, '' )
num = int(attrs[XML_AN_INIT])
self.__inst.append_value(name, num)
def __guess_int_value( self, value_as_str ):
#returns instance of int or None
#if gcc compiled the code, than it is correct!
numeric_suffix_letters = 'UuLlFf'
for s in numeric_suffix_letters:
value_as_str = value_as_str.replace( s, '' )
try:
return int( value_as_str )
except ValueError:
try:
return int( value_as_str, 16 )
except ValueError:
return None
def __read_array_type( self, attrs ):
type_ = attrs[ XML_AN_TYPE ]
size = self.__guess_int_value( attrs.get(XML_AN_MAX, '' ) )
if size is None:
size = array_t.SIZE_UNKNOWN
msg = 'unable to find out array size from expression "%s"' % attrs[ XML_AN_MAX ]
# warning is absolutely useless without much clue
# warnings.warn( msg )
return array_t( type_, size + 1 )
def __read_cv_qualified_type( self, attrs ):
if attrs.has_key( XML_AN_CONST ):
return const_t( attrs[XML_AN_TYPE] )
elif attrs.has_key( XML_AN_VOLATILE ):
return volatile_t( attrs[XML_AN_TYPE] )
elif attrs.has_key( XML_AN_RESTRICT ):
return restrict_t( attrs[XML_AN_TYPE] )
else:
assert 0
def __read_pointer_type( self, attrs ):
return pointer_t( attrs[XML_AN_TYPE] )
def __read_reference_type( self, attrs ):
return reference_t( attrs[XML_AN_TYPE] )
def __read_fundamental_type(self, attrs ):
try:
return FUNDAMENTAL_TYPES[ attrs.get( XML_AN_NAME, '' ) ]
except KeyError:
raise RuntimeError( "pygccxml error: unable to find fundamental type with name '%s'."
% attrs.get( XML_AN_NAME, '' ) )
def __read_offset_type( self,attrs ):
base = attrs[ XML_AN_BASE_TYPE ]
type_ = attrs[ XML_AN_TYPE ]
if '0.9' in self.__compiler:
return pointer_t( member_variable_type_t( class_inst=base, variable_type=type_ ) )
else:
return member_variable_type_t( class_inst=base, variable_type=type_ )
def __read_argument( self, attrs ):
if isinstance( self.__inst, calldef_type_t ):
self.__inst.arguments_types.append( attrs[XML_AN_TYPE] )
else:
argument = argument_t()
argument.name = attrs.get( XML_AN_NAME, 'arg%d' % len(self.__inst.arguments) )
argument.type = attrs[XML_AN_TYPE]
argument.default_value = attrs.get( XML_AN_DEFAULT, None )
self.__read_attributes( argument, attrs )
if argument.default_value == '<gccxml-cast-expr>':
argument.default_value = None
self.__inst.arguments.append( argument )
def __read_ellipsis( self, attrs ):
if isinstance( self.__inst, calldef_type_t ):
self.__inst.arguments_types.append( '...' )
else:
argument = argument_t( type='...' )
self.__inst.arguments.append( argument )
def __read_calldef( self, calldef, attrs, is_declaration ):
#destructor for example doesn't have return type
calldef.return_type = attrs.get( XML_AN_RETURNS, None )
if is_declaration:
self.__calldefs.append( calldef )
calldef.name = attrs.get(XML_AN_NAME, '')
calldef.has_extern = attrs.get( XML_AN_EXTERN, False )
throw_stmt = attrs.get( XML_AN_THROW, None )
if None is throw_stmt:
calldef.does_throw = True
calldef.exceptions = []
elif "" == throw_stmt:
calldef.does_throw = False
calldef.exceptions = []
else:
calldef.does_throw = True
calldef.exceptions = throw_stmt.split()
def __read_member_function( self, calldef, attrs, is_declaration ):
self.__read_calldef( calldef, attrs, is_declaration )
calldef.has_const = attrs.get( XML_AN_CONST, False )
if is_declaration:
calldef.has_static = attrs.get( XML_AN_STATIC, False )
if attrs.has_key( XML_AN_PURE_VIRTUAL ):
calldef.virtuality = VIRTUALITY_TYPES.PURE_VIRTUAL
elif attrs.has_key( XML_AN_VIRTUAL ):
calldef.virtuality = VIRTUALITY_TYPES.VIRTUAL
else:
calldef.virtuality = VIRTUALITY_TYPES.NOT_VIRTUAL
else:
calldef.class_inst = attrs[XML_AN_BASE_TYPE]
def __read_function_type(self, attrs):
answer = free_function_type_t()
self.__read_calldef( answer, attrs, False )
return answer
def __read_method_type(self, attrs):
answer = member_function_type_t()
self.__read_member_function( answer, attrs, False )
return answer
def __read_typedef(self, attrs ):
return self.__decl_factory.create_typedef( name=attrs.get( XML_AN_NAME, '' ), type=attrs[XML_AN_TYPE])
def __read_variable(self, attrs ):
type_qualifiers = type_qualifiers_t()
type_qualifiers.has_mutable = attrs.get(XML_AN_MUTABLE, False)
type_qualifiers.has_static = attrs.get(XML_AN_EXTERN, False)
bits = attrs.get( XML_AN_BITS, None )
if bits:
bits = int( bits )
decl = self.__decl_factory.create_variable( name=attrs.get( XML_AN_NAME, '' )
, type=attrs[XML_AN_TYPE]
, type_qualifiers=type_qualifiers
, value=attrs.get( XML_AN_INIT, None )
, bits=bits)
self.__read_byte_offset(decl, attrs)
return decl
__read_field = __read_variable #just a synonim
def __read_class_impl(self, class_type, attrs):
decl = None
name = attrs.get(XML_AN_NAME, '')
if '$' in name or '.' in name:
name = ''
if attrs.has_key( XML_AN_INCOMPLETE ):
decl = self.__decl_factory.create_class_declaration(name=name)
else:
decl = self.__decl_factory.create_class( name=name, class_type=class_type )
if attrs.get( XML_AN_ABSTRACT, False ):
decl.is_abstract = True
else:
decl.is_abstract = False
self.__read_byte_size(decl, attrs)
self.__read_byte_align(decl, attrs)
return decl
def __read_class( self, attrs ):
return self.__read_class_impl( CLASS_TYPES.CLASS, attrs )
def __read_struct( self, attrs ):
return self.__read_class_impl( CLASS_TYPES.STRUCT, attrs )
def __read_union( self, attrs ):
return self.__read_class_impl( CLASS_TYPES.UNION, attrs )
def __read_casting_operator(self, attrs ):
operator = self.__decl_factory.create_casting_operator()
self.__read_member_function( operator, attrs, True )
return operator
def __read_constructor( self, attrs ):
constructor = self.__decl_factory.create_constructor()
self.__read_member_function( constructor, attrs, True )
return constructor
def __read_function(self, attrs):
gfunction = self.__decl_factory.create_free_function()
self.__read_calldef( gfunction, attrs, True )
return gfunction
def __read_method(self, attrs):
mfunction = self.__decl_factory.create_member_function()
self.__read_member_function( mfunction, attrs, True )
return mfunction
def __read_destructor(self, attrs):
destructor = self.__decl_factory.create_destructor()
self.__read_member_function( destructor, attrs, True )
destructor.name = '~' + destructor.name
return destructor
def __read_free_operator(self, attrs ):
operator = self.__decl_factory.create_free_operator()
self.__read_member_function( operator, attrs, True )
if 'new' in operator.name or 'delete' in operator.name:
operator.name = 'operator ' + operator.name
else:
operator.name = 'operator' + operator.name
return operator
def __read_member_operator(self, attrs):
operator = self.__decl_factory.create_member_operator()
self.__read_member_function( operator, attrs, True )
if 'new' in operator.name or 'delete' in operator.name:
operator.name = 'operator ' + operator.name
else:
operator.name = 'operator' + operator.name
return operator
def __read_version(self, attrs):
logger = utils.loggers.cxx_parser
version = float( attrs.get(XML_AN_CVS_REVISION, 0.6) )
if version is None:
logger.info ( 'GCCXML version - 0.6' )
self.__compiler = compilers.GCC_XML_06
elif version <= 1.114:
logger.info ( 'GCCXML version - 0.7' )
self.__compiler = compilers.GCC_XML_07
elif version in ( 1.115, 1.116, 1.117, 1.118, 1.119, 1.120, 1.121 ):
logger.info ( 'GCCXML version - 0.9 BUGGY' )
self.__compiler = compilers.GCC_XML_09_BUGGY
else:
logger.info ( 'GCCXML version - 0.9' )
self.__compiler = compilers.GCC_XML_09
|
[
"fede.anne95@hotmail.it"
] |
fede.anne95@hotmail.it
|
d1d2cb8fa62f4abf11d0f3c031e100adb3008d82
|
6ed01f4503fc9de234a561c945adff7cf4b1c81b
|
/ncar_lib/incites_authors/incites_people_reporter.py
|
24328f4c760673ff83a6c2cbdde1365ae3f9d3f8
|
[] |
no_license
|
ostwald/python-lib
|
b851943c913a68424a05ce3c7b42878ff9519f68
|
9acd97ffaa2f57b3e9e632e1b75016549beb29e5
|
refs/heads/master
| 2021-10-28T06:33:34.156095
| 2021-10-21T23:54:49
| 2021-10-21T23:54:49
| 69,060,616
| 0
| 1
| null | 2018-06-21T16:05:30
| 2016-09-23T21:04:46
|
Roff
|
UTF-8
|
Python
| false
| false
| 5,465
|
py
|
"""
"""
import os, sys
from UserList import UserList
from find_person import PersonFinder, resolveFullName
from data_filter import FilteredAuthorData
from HyperText.HTML40 import *
from html import HtmlDocument
class InCitesAuthorInfo:
def __init__ (self, id, finder, matches):
"""
names are derived from inCitesAuthor data
matches come from peopleDB
"""
self.id = id
# self.inCitesName = fullname
for attr in ['fullname', 'firstName', 'middleName', 'lastName', 'note']:
setattr (self, attr, getattr (finder, attr))
self.matches = matches
self.numMatches = len(matches)
class SimpleReporter (UserList):
def __init__ (self):
self.data = []
self.recordsToReport = 10
self.errors = []
self.notes = []
self.people = FilteredAuthorData().people # UniquePeople
# self.report()
self.getAuthorMatchInfo()
def getAuthorMatchInfo (self):
person_counter = 1
max_count = self.recordsToReport or len(self.people.keys())
for fullname in self.people.keys()[:max_count]:
try:
finder = PersonFinder (fullname)
candidates = finder.candidates
id = 'author-%d' % person_counter
person_counter += 1
# print 'processing authorInfo for ' + fullname
authorInfo = InCitesAuthorInfo (id, finder, candidates)
self.append(authorInfo)
except KeyError, e:
print 'ERROR', e
self.errors.append(fullname + ": " + str(e))
def report(self):
for authorInfo in self:
try:
if authorInfo.numMatches == 1:
continue
if authorInfo.note:
self.notes.append(authorInfo.note)
## print '\n%s (%d)' % (fullname, size)
print "\n%d candidates found for '%s' (%s | %s)" % \
(len(authorInfo.matches),
authorInfo.fullname,
authorInfo.lastName,
authorInfo.firstName)
for person in authorInfo.matches:
print '- ', person
except Exception, e:
self.errors.append(authorInfo.fullname + ": " + str(e))
def showErrors (self):
if self.errors:
print '\nNames that could not be parsed'
for error in self.errors:
print error
else:
print '\nAll names were parsed'
def showNotes(self):
if self.notes:
print '\nNotes'
for note in notes:
print note
else:
print '\nNo notes generated'
class HtmlReporter (SimpleReporter):
results_columns = ['numMatches', 'inCitesName', 'peopleDBlink']
def __init__ (self):
SimpleReporter.__init__ (self)
self.htmlDoc = None
print '%d authorInfo instances' % len(self.data)
def asHtmlDoc (self):
if self.htmlDoc is None:
mockup_link = Href ("../reporter-mockup.html", 'to Mockup')
reportTable = self.makeReportHtml()
javascript = [
'javascript/prototype.js',
'javascript/scriptaculous-js-1.9.0/scriptaculous.js',
'javascript/toggler.js',
'javascript/decorate_upids.js'
]
self.htmlDoc = HtmlDocument(mockup_link,
reportTable,
title="inCites Author Reporter",
stylesheet="styles.css",
javascript=javascript)
return self.htmlDoc
def getInCitesAuthorInfo (self, authorInfo):
"""
make the html for a inCitesAuthor & its matches
"""
print 'getInCitesAuthorInfo with ' + authorInfo.fullname
id = authorInfo.id
togglerClass = authorInfo.matches and "toggler" or ""
toggler = DIV (id=id, klass=togglerClass)
if authorInfo.numMatches > 0:
togglerLnkClass = "inCitesAuthor togglerClosed"
else:
togglerLnkClass = "inCitesAuthor noTogglerClosed"
# print "%s %s" % (authorInfo.inCitesName, authorInfo.numMatches)
togglerLnk = DIV(id='toggler-lnk-'+id, klass=togglerLnkClass)
toggler.append(togglerLnk)
authorTable = TABLE(klass="authorTable")
togglerLnk.append(authorTable)
matchesContent = '(%d matches)' % authorInfo.numMatches
authorTable.append(
TR(
TD (authorInfo.fullname, klass="author"),
TD (matchesContent, klass="matches")
)
)
if authorInfo.numMatches > 0:
togglerCon = DIV(id='toggler-con-'+id, style="display:none")
toggler.append(togglerCon)
matchTable = TABLE(klass="matchTable")
togglerCon.append(matchTable)
for match in authorInfo.matches:
match_row = TR (klass="peopleDBmatch", id=match.upid)
match_row.append (
TD (match.getName(), klass="match-name"),
TD (match.upid, klass="match-upid"))
matchTable.append(match_row);
return toggler
def makeReportHtml (self):
report = DIV (id='reportTable')
person_counter = 1
max_count = self.recordsToReport or len(self.people.keys())
for authorInfo in self:
# print authorInfo.fullname, authorInfo.numMatches
try:
if authorInfo.numMatches == 1:
continue
if authorInfo.note:
self.notes.append(authorInfo.note)
# print 'processing authorInfo for ' + fullname
report.append (HR (klass="divider"))
report.append(self.getInCitesAuthorInfo (authorInfo))
except Exception, e:
self.errors.append(authorInfo.fullname + ": " + str(e))
return report
def writeHtmlDoc (self, path=None):
path = path or "report_html/INCITES_REPORT.html"
fp = open (path, 'w')
fp.write (self.asHtmlDoc().__str__())
fp.close()
print "wrote to " + path
if __name__ == '__main__':
# findPeople()
if 0:
reporter = SimpleReporter()
reporter.report()
if 1:
reporter = HtmlReporter()
print reporter.asHtmlDoc()
# reporter.writeHtmlDoc()
reporter.showErrors()
reporter.showNotes()
# print reporter.asHtmlDoc()
|
[
"ostwald@ucar.edu"
] |
ostwald@ucar.edu
|
f6a2b705d388a7c343310c3da3e8ed2698926621
|
14e3ecd6a5bfc4cba3b990d561a3d6db70f43430
|
/UDA/main.py
|
37d29c6ca6da7c86a831b725c3200b77e446e89d
|
[] |
no_license
|
zuiwufenghua/Multi-Cell_LSTM
|
61d487645e1c2d3f93f95bda2e8f46fabd8a3b3a
|
c4938222a559d93aa8377d4e1a46a27aff86457c
|
refs/heads/master
| 2023-01-23T06:20:56.883335
| 2020-12-02T09:41:32
| 2020-12-02T09:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,861
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import sys
import math
import argparse
import random
import torch
import gc
import torch.nn as nn
import torch.optim as optim
import numpy as np
from utils.metric import get_ner_fmeasure
from model.seqlabel import SeqLabel
from model.sentclassifier import SentClassifier
from utils.data import Data
import os
try:
import cPickle as pickle
except ImportError:
import pickle
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
seed_num = 42
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
CAT = ['PER', 'ORG', 'LOC', 'MISC']
POSITION = ['I', 'B', 'E', 'S']
LABEL_INDEX = ['O'] + ["{}-{}".format(position, cat) for cat in CAT for position in POSITION]
def data_initialization(data):
data.initial_feature_alphabets()
if data.task == 'NER':
for tag in LABEL_INDEX:
data.label_alphabet.add(tag)
data.entity_type.append('O')
for entity_name in CAT:
data.entity_type.append(entity_name)
for entity in data.entity_type:
data.entity_alphabet.add(entity)
for pos_name in POSITION:
data.position_type.append(pos_name)
data.build_alphabet(data.train_dir)
data.build_alphabet(data.dev_dir)
data.build_alphabet(data.test_dir)
data.build_alphabet_raw(data.raw_data_dir)
data.fix_alphabet()
for i in range(data.label_alphabet.size()-1):
print(data.label_alphabet.instances[i])
data.build_entity_dict(data.entity_dict_dir)
def predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result, in numpy format
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
if sentence_classification:
# print(overlaped)
# print(overlaped*pred)
right_token = np.sum(overlaped)
total_token = overlaped.shape[0] ## =batch_size
else:
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
# print("right: %s, total: %s"%(right_token, total_token))
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
"""
input:
pred_variable (batch_size, sent_len, nbest): pred tag result
mask_variable (batch_size, sent_len): mask variable
word_recover (batch_size)
output:
nbest_pred_label list: [batch_size, nbest, each_seq_len]
"""
# exit(0)
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if mask[idx][idy] != 0]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr/(1+decay_rate*epoch)
print(" Learning rate is set as:", lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def evaluate(data, model, name, nbest=None):
if name == "train":
instances = data.train_Ids
elif name == "dev":
instances = data.dev_Ids
elif name == 'test':
instances = data.test_Ids
elif name == 'raw':
instances = data.raw_Ids
else:
print("Error: wrong evaluate name,", name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
gold_entity_results = []
pred_entity_results = []
gold_probs_results = []
pred_probs_results = []
## set model in eval model
model.eval()
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
if not instance:
continue
original_words_batch, batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, batch_entity, lm_seq_tensor, mask = batchify_with_label(instance, data.HP_gpu, False, data.sentence_classification)
if nbest and not data.sentence_classification:
scores, nbest_tag_seq, entity_seq, atten_probs_seq = model.decode_nbest(original_words_batch, batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
## select the best sequence to evalurate
tag_seq = nbest_tag_seq[:,:,0]
else:
tag_seq, entity_seq, atten_probs_seq = model(original_words_batch, batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask)
# print("tag:",tag_seq)
pred_entity, gold_entity = recover_label(entity_seq, batch_entity, mask, data.entity_alphabet, batch_wordrecover, data.sentence_classification)
pred_entity_results += pred_entity
gold_entity_results += gold_entity
pred_probs, gold_probs = recover_label(atten_probs_seq, batch_entity, mask, data.entity_alphabet, batch_wordrecover, data.sentence_classification)
pred_probs_results += pred_probs
gold_probs_results += gold_probs
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = time.time() - start_time
speed = len(instances)/decode_time
print("word acc:")
acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme)
print("entity acc:")
entity_acc, _, _, _ = get_ner_fmeasure(gold_entity_results, pred_entity_results, "entity predict")
print("probs acc:")
probs_acc, _, _, _ = get_ner_fmeasure(gold_probs_results, pred_probs_results, "probs predict")
if nbest and not data.sentence_classification:
return speed, acc, p, r, f, nbest_pred_results, pred_scores
return speed, acc, p, r, f, pred_results, pred_scores
def batchify_with_label(input_batch_list, gpu, if_train=True, sentence_classification=False):
if sentence_classification:
return batchify_sentence_classification_with_label(input_batch_list, gpu, if_train)
else:
return batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train)
def batchify_sequence_labeling_with_label(input_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, sent_len, feature_num)
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size, sent_len)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
"""
device = torch.device('cuda' if gpu and torch.cuda.is_available() else 'cpu')
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0][0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
entities = [sent[4] for sent in input_batch_list]
original_words = [sent[5] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
entity_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
lm_forward_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
lm_backward_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad=if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
for idx, (seq, label, entity, seqlen) in enumerate(zip(words, labels, entities, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
if seqlen > 1:
lm_forward_seq_tensor[idx, 0: seqlen - 1] = word_seq_tensor[idx, 1: seqlen]
lm_forward_seq_tensor[idx, seqlen - 1] = torch.LongTensor([1]) # unk word
lm_backward_seq_tensor[idx, 1: seqlen] = word_seq_tensor[idx, 0: seqlen - 1]
lm_backward_seq_tensor[idx, 0] = torch.LongTensor([1]) # unk word
else:
lm_forward_seq_tensor[idx, 0] = torch.LongTensor([1]) # unk word
lm_backward_seq_tensor[idx, 0] = torch.LongTensor([1]) # unk word
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
entity_seq_tensor[idx, :seqlen] = torch.LongTensor(entity)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_lengths = word_seq_lengths.to(device)
word_seq_tensor = word_seq_tensor[word_perm_idx].to(device)
# reorder sentence index
new_original_words = [] # list[list[word]]
for i in word_perm_idx:
new_original_words.append(original_words[i])
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx].to(device)
lm_forward_seq_tensor = lm_forward_seq_tensor[word_perm_idx].to(device)
lm_backward_seq_tensor = lm_backward_seq_tensor[word_perm_idx].to(device)
label_seq_tensor = label_seq_tensor[word_perm_idx].to(device)
entity_seq_tensor = entity_seq_tensor[word_perm_idx].to(device)
mask = mask[word_perm_idx].to(device)
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx].to(device)
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
char_seq_recover = char_seq_recover.to(device)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
word_seq_recover = word_seq_recover.to(device)
lm_seq_tensor = [lm_forward_seq_tensor, lm_backward_seq_tensor]
return new_original_words, word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, entity_seq_tensor, lm_seq_tensor, mask
def batchify_sentence_classification_with_label(input_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size,), each sentence has one set of feature
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size,), ... ] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, )
mask: (batch_size, max_sent_len)
"""
device = torch.device('cuda' if gpu and torch.cuda.is_available() else 'cpu')
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, ), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
label_seq_tensor = torch.LongTensor(labels)
# exit(0)
for idx, (seq, seqlen) in enumerate(zip(words, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_lengths = word_seq_lengths.to(device)
word_seq_tensor = word_seq_tensor[word_perm_idx].to(device)
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx].to(device)
label_seq_tensor = label_seq_tensor[word_perm_idx].to(device)
mask = mask[word_perm_idx].to(device)
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx].to(device)
char_seq_tensor = char_seq_tensor.to(device)
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
char_seq_recover = char_seq_recover.to(device)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
word_seq_recover = word_seq_recover.to(device)
return word_seq_tensor, feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
def train(data):
print("Training model...")
device = torch.device('cuda' if torch.cuda.is_available() and data.HP_gpu else 'cpu')
data.show_data_summary()
save_data_name = data.model_dir +".dset"
data.save(save_data_name)
if data.sentence_classification:
model = SentClassifier(data).to(device)
else:
model = SeqLabel(data).to(device)
for name, param in model.named_parameters():
if param.requires_grad:
print(name)
## compute model parameter num
n_all_param = sum([p.nelement() for p in model.parameters()])
n_emb_param = sum([p.nelement() for p in (model.word_hidden.wordrep.word_embedding.weight, model.word_hidden.wordrep.char_feature.char_embeddings.weight, model._LM_softmax.softmax_w, model._LM_softmax.softmax_b)])
print("all parameters=%s, emb parameters=%s, other parameters=%s" % (n_all_param, n_emb_param, n_all_param-n_emb_param))
## not update the word embedding
#model.word_hidden.wordrep.word_embedding.weight.requires_grad = False
if data.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adadelta":
optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
else:
print("Optimizer illegal: %s"%(data.optimizer))
exit(1)
best_dev = -10
test_f = []
dev_f = []
best_epoch = 0
# data.HP_iteration = 1
## start training
for idx in range(data.HP_iteration):
epoch_start = time.time()
temp_start = epoch_start
print("Epoch: %s/%s" %(idx,data.HP_iteration))
if data.optimizer == "SGD":
optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)
instance_count = 0
sample_id = 0
sample_loss = 0
total_loss = 0
total_perplexity = 0
right_entity = 0
right_atten_probs = 0
right_token = 0
whole_token = 0
random.shuffle(data.train_Ids)
random.shuffle(data.raw_data_Ids)
print("Shuffle: first input word list:", data.train_Ids[0][0])
## set model in train model
model.train()
model.zero_grad()
batch_size = data.HP_batch_size
batch_id = 0
train_num = len(data.train_Ids)
raw_data_num = len(data.raw_data_Ids)
total_batch = train_num//batch_size+1
raw_batch_size = raw_data_num//total_batch
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = data.train_Ids[start:end]
start_raw = batch_id * raw_batch_size
end_raw = (batch_id+1) * raw_batch_size
if end_raw > raw_data_num:
end_raw = raw_data_num
instance_raw = data.raw_data_Ids[start_raw:end_raw]
if not instance:
continue
instance_count += 1
instances = [instance, instance_raw]
loss_ = 0.0
for mode_idx, mode in enumerate(['train', 'raw']):
if len(instance[mode_idx]) < 1:
continue
# print(instance[1])
original_words_batch, batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, batch_entity, lm_seq_tensor, mask = batchify_with_label(instances[mode_idx], data.HP_gpu, True, data.sentence_classification)
if mode == 'train':
loss, tag_seq, entity_seq, atten_probs_seq = model.calculate_loss(original_words_batch, batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, batch_entity, mask)
right, whole = predict_check(tag_seq, batch_label, mask, data.sentence_classification)
entity_right, entity_whole = predict_check(entity_seq, batch_entity, mask, data.sentence_classification)
atten_probs_right, atten_probs_whole = predict_check(atten_probs_seq, batch_entity, mask, data.sentence_classification)
right_token += right
whole_token += whole
right_entity += entity_right
right_atten_probs += atten_probs_right
elif mode == 'raw':
loss, perplexity, _,_ = model.raw_loss(original_words_batch, batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, batch_entity, lm_seq_tensor, mask)
total_perplexity += perplexity.item()
loss_ += loss
sample_loss += loss_.item()
total_loss += loss_.item()
loss_.backward()
optimizer.step()
model.zero_grad()
LM_perplex = math.exp(total_perplexity / total_batch)
temp_time = time.time()
temp_cost = temp_time - temp_start
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token, (right_token+0.)/whole_token))
print(" total perplexity: %.4f" % (LM_perplex))
print(" entity acc: %.4f"%((right_entity+0.)/whole_token))
print(" atten probs acc: %.4f" % ((right_atten_probs + 0.) / whole_token))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s"%(idx, epoch_cost, train_num/epoch_cost, total_loss))
print("totalloss:", total_loss)
if total_loss > 1e8 or str(total_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
exit(1)
# continue
speed, acc, p, r, f, _,_ = evaluate(data, model, "dev")
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
if data.seg:
current_score = f
print("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(dev_cost, speed, acc, p, r, f))
else:
current_score = acc
print("Dev: time: %.2fs speed: %.2fst/s; acc: %.4f"%(dev_cost, speed, acc))
dev_f.append(current_score)
if current_score > best_dev:
best_epoch = idx
if data.seg:
print("Exceed previous best f score:", best_dev)
else:
print("Exceed previous best acc score:", best_dev)
model_name = data.model_dir + ".model"
print("Save current best model in file:", model_name)
torch.save(model.state_dict(), model_name)
best_dev = current_score
# ## decode test
speed, acc, p, r, f, _,_ = evaluate(data, model, "test")
test_finish = time.time()
test_cost = test_finish - dev_finish
if data.seg:
test_f.append(f)
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost, speed, acc, p, r, f))
else:
test_f.append(acc)
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f"%(test_cost, speed, acc))
gc.collect()
print("The best f in eopch%s, dev:%.4f, test:%.4f" % (best_epoch, dev_f[best_epoch], test_f[best_epoch]))
def load_model_decode(data, name):
print("Load Model from file: ", data.model_dir)
device = torch.device('cuda' if torch.cuda.is_available() and data.HP_gpu else 'cpu')
if data.sentence_classification:
model = SentClassifier(data).to(device)
else:
model = SeqLabel(data).to(device)
# model = SeqModel(data)
## load model need consider if the model trained in GPU and load in CPU, or vice versa
# if not gpu:
# model.load_state_dict(torch.load(model_dir))
# # model.load_state_dict(torch.load(model_dir), map_location=lambda storage, loc: storage)
# # model = torch.load(model_dir, map_location=lambda storage, loc: storage)
# else:
# model.load_state_dict(torch.load(model_dir))
# # model = torch.load(model_dir)
model.load_state_dict(torch.load(data.load_model_dir))
## compute model parameter num
n_all_param = sum([p.nelement() for p in model.parameters()])
n_emb_param = sum([p.nelement() for p in (model.word_hidden.wordrep.word_embedding.weight, model.word_hidden.wordrep.char_feature.char_embeddings.weight, model._LM_softmax.softmax_w, model._LM_softmax.softmax_b)])
print("all parameters=%s, emb parameters=%s, other parameters=%s" % (n_all_param, n_emb_param, n_all_param-n_emb_param))
print("Decode %s data, nbest: %s ..."%(name, data.nbest))
start_time = time.time()
speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, name, data.nbest)
end_time = time.time()
time_cost = end_time - start_time
if data.seg:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(name, time_cost, speed, acc, p, r, f))
else:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f"%(name, time_cost, speed, acc))
return pred_results, pred_scores
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tuning with NCRF++')
# parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--config', help='Configuration File', default='None')
parser.add_argument('--wordemb', help='Embedding for words', default='None')
parser.add_argument('--charemb', help='Embedding for chars', default='None')
parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--savemodel', default="data/model/saved_model.lstmcrf.")
parser.add_argument('--savedset', help='Dir of saved data setting')
parser.add_argument('--train', default="data/conll03/train.bmes")
parser.add_argument('--dev', default="data/conll03/dev.bmes" )
parser.add_argument('--test', default="data/conll03/test.bmes")
parser.add_argument('--seg', default="True")
parser.add_argument('--raw')
parser.add_argument('--loadmodel')
parser.add_argument('--output')
args = parser.parse_args()
data = Data()
if args.config == 'None':
data.train_dir = args.train
data.dev_dir = args.dev
data.test_dir = args.test
data.model_dir = args.savemodel
data.dset_dir = args.savedset
print("Save dset directory:",data.dset_dir)
save_model_dir = args.savemodel
data.word_emb_dir = args.wordemb
data.char_emb_dir = args.charemb
if args.seg.lower() == 'true':
data.seg = True
else:
data.seg = False
print("Seed num:",seed_num)
else:
data.read_config(args.config)
# data.show_data_summary()
status = data.status.lower()
print("Seed num:",seed_num)
if status == 'train':
print("MODEL: train")
data_initialization(data)
data.generate_instance('train')
data.generate_instance('dev')
data.generate_instance('test')
data.build_pretrain_emb()
train(data)
elif status == 'decode':
print("MODEL: decode")
data.load(data.dset_dir)
data.read_config(args.config)
print(data.raw_dir)
# exit(0)
data.show_data_summary()
data.generate_instance('raw')
print("nbest: %s"%(data.nbest))
decode_results, pred_scores = load_model_decode(data, 'raw')
if data.nbest and not data.sentence_classification:
data.write_nbest_decoded_results(decode_results, pred_scores, 'raw')
else:
data.write_decoded_results(decode_results, 'raw')
else:
print("Invalid argument! Please use valid arguments! (train/test/decode)")
|
[
"noreply@github.com"
] |
zuiwufenghua.noreply@github.com
|
aba125b8f91634b95df90b074b7664738b3c06a0
|
7f74e34fedf47766fdb280404eed5336b03c1461
|
/MII/Sede/forms.py
|
25cc0808ceaa6031fca8e2d8956257d290b79a8c
|
[] |
no_license
|
ExiledGod/Universidad
|
bcbdd6a11b6cbb134064792701380419ac290ca0
|
bb6cc13b83630b63b0ac594b3bee436a0a21eabe
|
refs/heads/master
| 2022-12-14T16:15:25.261425
| 2020-08-27T05:28:23
| 2020-08-27T05:28:23
| 290,684,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
from Sede.models import Sede, DireccionSede
from django import forms
class Sede_form(forms.ModelForm):
model = Sede
fields = [
'nombre',
]
labels = {
'nombre': 'Nombre: ',
}
class DireccionForm(forms.ModelForm):
class Meta:
model = DireccionSede
fields = [
'region',
'provincia',
'comuna',
'direccion',
]
labels = {
'region': 'Región',
'provincia': 'Provincia',
'comuna': 'Comuna',
'direccion': 'Dirección',
}
widgets = {
'region': forms.Select(attrs={'class':'form-control'}),
'provincia': forms.Select(attrs={'class':'form-control'}),
'comuna': forms.Select(attrs={'class':'form-control'}),
'direccion': forms.TextInput(attrs={'class':'form-control'}),
}
|
[
"66937061+ExiledGod@users.noreply.github.com"
] |
66937061+ExiledGod@users.noreply.github.com
|
2d11d716b8d9b87ddcac994061c1cbae63557549
|
b2599eaa38eb035555b1aebab1cb188fa4e01cbe
|
/whileloop.py
|
37e3b220880f685cb4263dcc38659e66243c34a4
|
[] |
no_license
|
surut555/basicpython
|
c1f14c136480f4cda92eae1eeecdf407f974b6fb
|
7ae86bfb488c91243a802760646d1f3f0441435f
|
refs/heads/main
| 2023-01-04T02:42:20.987473
| 2020-11-01T02:33:46
| 2020-11-01T02:33:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
i = 1
while i <= 10:
if i == 10:
print(i)
else:
print(i, end=',')
i = i+1
a = 1
while True: # infinity loop
print(a)
a = a+1
|
[
"63528254+surut555@users.noreply.github.com"
] |
63528254+surut555@users.noreply.github.com
|
94ef4b15b8b61bb1acb9913ad5c3069aaed77362
|
e1b6cce87a76d967817cb979a9de8713abe66764
|
/forwards/pc_0.py
|
6a538d8b6afa2c7462bbdaed91bf2d20f0207c06
|
[] |
no_license
|
MaxAndrewNZ/AutonomousNavigation
|
816c773461f96446c02df23c2dbaacfa2448a35c
|
b70474c4f1f315c4609482fa3156430c64be7003
|
refs/heads/main
| 2023-02-02T05:26:52.874174
| 2020-12-07T22:46:03
| 2020-12-07T22:46:03
| 317,351,128
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,668
|
py
|
import sys
sys.path.insert(1, '../side_camera_test/')
import time,zmq,pickle
import numpy as np
import pyrealsense2 as rs
import open3d as o3d
import vehicle as veh
import pointcloud as pc
import visualise_pointclouds as visualise
import display_information as display
import cv2
import math
import random
import os
def fit_line_to_points(points):
X = points[:,0]
Y = points[:,1]
xbar = sum(X)/len(X)
ybar = sum(Y)/len(Y)
n = len(X) # or len(Y)
numer = sum([xi*yi for xi,yi in zip(X, Y)]) - n * xbar * ybar
denum = sum([xi**2 for xi in X]) - n * xbar**2
b = numer / denum
a = ybar - b * xbar
print('best fit line:\ny = {:.2f} + {:.2f}x'.format(a, b))
return a, b
def get_test_pointcloud(moving, shift_x, shift_y, delay=0.3):
# Use saved pointcloud file
filename = "1.ply"
pcd = o3d.io.read_point_cloud(filename)
npCloud = np.asarray(pcd.points)
# Flips points to align with those from the landrov
offset = np.array([-1.8 + shift_x, 0, -0.5 - shift_y])
pcd = pc.npToPcd(npCloud * np.array([1, -1, -1]) + offset)
# Simulate delays in recieving pointcloud
time.sleep(delay)
if moving:
shift_x += 0.05 * random.randint(0, 3)
shift_y += 0.05 * random.randint(0, 3)
return pcd, shift_x, shift_y
def flatten_cloud(cloud):
np_cloud = np.asarray(cloud.pcd.points)
removed_y = np.delete(np_cloud, 1, axis=1)
removed_dups = np.unique(removed_y, axis=0)
return removed_dups
def main():
"""
This method is the heart of the Landrov navigation.
It handles controling the multi-step navigation.
The navigation settings can be configured within this.
"""
############ Configuration ##################
testing = True
plotting_error = True
moving = True
target_distance = 1.0 # Meters
speed = 0 # 0 - 1
# Proportional and derivative constants
linear_p_const = 0
angular_p_const = 1
# Data lists
linear_errors = []
angular_errors = []
linear_velocities = []
angular_velocities = []
# Region of interest
minX = -2.5
maxX = 0
minY = -1.5
maxY = -0.25
minZ = 0.01
maxZ = 2.0
region_min = [minX, minY, minZ]
region_max = [maxX, maxY, maxZ]
vehicle = veh.Vehicle("tcp://192.168.8.106", "5556", "5557")
if testing == False:
vehicle.connect_control()
print('Connected to vehicle server')
found_cloud = False
updated_cloud = False
time_start = time.time()
shift_x = 0.1
shift_y = 0.1
try:
while 1:
time_start = time.time()
if (testing):
pcd, shift_x, shift_y = get_test_pointcloud(moving, shift_x, shift_y)
time_start = time.time()
found_cloud = True
if not found_cloud:
vehicle.connect_pointcloud()
while not found_cloud:
if len(zmq.select([vehicle.sensor_socket],[],[],0)[0]):
topic,buf = vehicle.sensor_socket.recv_multipart()
if topic == b'pointcloud':
np_pcd = np.fromstring(buf, dtype=np.float32)
num_points = np_pcd.size // 3
reshaped_pcd = np.resize(np_pcd, (num_points, 3))
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(reshaped_pcd)
print("Time get cloud", round(time.time() - time_start, 2))
time_start = time.time()
found_cloud = True
vehicle.sensor_socket.close()
else:
downpcd = pcd.voxel_down_sample(voxel_size=0.1)
cloud = pc.PointCloud(downpcd, [0, 0, 0], region_min, region_max)
cloud.pcd, ind = cloud.pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
updated_cloud = True
if len(cloud.pcd.points) == 0:
break
flat_cloud = flatten_cloud(cloud)
# Print status
print("*" * 64)
template = "Points in cloud {}"
print(template.format(len(cloud.pcd.points)))
#TODO: Fit line to cloud.
a, b = fit_line_to_points(flat_cloud)
#TODO: Calculate angle error and distance error.
angle = - (math.atan(b) + (math.pi / 2))
a_error = angle
angular_p = angular_p_const * a_error
distance = abs(a / (math.sqrt((b ** 2 + 1))))
d_error = distance - target_distance
current_time = time.time()
if len(linear_errors) == 0:
linear_errors.append((current_time - 1, 0))
linear_p = linear_p_const * d_error
linear_errors.append((current_time, d_error))
# Wall angle error
angular_errors.append((current_time, a_error))
#TODO: Make a navigation adjustment from this information.
angular_velocity = max(min(angular_p + linear_p, 2), -2)
linear_velocity = speed
print("Target Distance:", target_distance, "Distance:", round(distance, 2), "D error:", round(d_error, 2))
print("Angle Degrees:", round(math.degrees(angle), 2), "Angle Rad:", round(angle, 2), "A error:", round(a_error, 2))
print("Linear velocity:", round(linear_velocity, 2), "Angular velocity:", round(angular_velocity, 2))
linear_velocities.append((current_time, linear_velocity))
angular_velocities.append((current_time, angular_velocity))
# Plot this information
if plotting_error:
display.display_line_follow(flat_cloud, a, b, region_min, region_max, linear_errors, angular_errors, linear_velocities, angular_velocities)
if updated_cloud:
print("Time processing", round(time.time() - time_start, 2))
if updated_cloud and not testing:
if moving:
vehicle.velocity_to_motor_command(linear_velocity, angular_velocity)
found_cloud = False
updated_cloud = False
except KeyboardInterrupt:
print("Force Close")
if not testing:
vehicle.stop()
display.display_errors(linear_errors, angular_errors)
main()
|
[
"mla138@uclive.ac.nz"
] |
mla138@uclive.ac.nz
|
9484689e364e1a12491127ddc0afa4929c215d8d
|
9f541271263dceb0b7b0134ab6cff5ed324c6444
|
/json/conf.py
|
cb6e2722d1f1194238a9bb9841615fcfe3adf803
|
[] |
no_license
|
shannondec/bonaventureli.github.io
|
bd0fa5286ab4077b690f2621425ddd78f16e72dc
|
ab442ff704dc539b5c9da1d7cacb826256392c17
|
refs/heads/main
| 2023-06-04T10:34:40.224571
| 2021-06-29T08:38:53
| 2021-06-29T08:38:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'JSON'
copyright = '2021, bona'
author = 'bona'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
[
"lfwendula@msn.cn"
] |
lfwendula@msn.cn
|
60cda957b628771dede0236cb5bbb72670ae6f12
|
e95d6f6842e7d32a2933676e12d73cbe8cf9d7d4
|
/JSON_BOOK_Programs/1_Theano_example.py
|
505d5f01450e75013c437208ed149f090545f781
|
[] |
no_license
|
MehreenTariq12/mehru
|
87acda07c564f37107aa8a3e2da247216c734d35
|
af5da763197696394dfe751d2c54e3caab7d3f62
|
refs/heads/master
| 2022-06-24T03:59:54.542892
| 2020-05-10T06:03:05
| 2020-05-10T06:03:05
| 256,930,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
import theano
from theano import tensor
a = tensor.dscalar()
b = tensor.dscalar()
c = a + b
f = theano.function([a, b], c)
result = f(1, 2)
print(result)
|
[
"noreply@github.com"
] |
MehreenTariq12.noreply@github.com
|
c137360796290d8e49d6e99af906017dcf7bd04d
|
8908ac4efbb943e05cda86d84b11264f4e31a59e
|
/addons/to_paypal_unsupported_currencies/models/account_payment.py
|
a39199982bd52ca42d5d1361dcf480db69ee07a3
|
[
"Unlicense"
] |
permissive
|
kit9/vocal_v12
|
c38c17921a647c16a839110d3452dddcaf3f0036
|
480990e919c9410903e06e7813ee92800bd6a569
|
refs/heads/master
| 2023-02-01T15:55:13.461862
| 2020-12-15T12:06:06
| 2020-12-15T12:06:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
from odoo import models, fields
class AccountPayment(models.Model):
_inherit = 'account.payment'
paypal_original_unsupported_currency_id = fields.Many2one('res.currency', string='Paypal Original Unsupported Currency',
help="A technical field to store the original Paypal unsupported currency"
" in case of payment with Paypal using a currency that is not supported by Paypal.")
paypal_original_unsupported_currency_amount = fields.Monetary(string='Paypal Original Unsupported Currency Amount',
currency_field='paypal_original_unsupported_currency_id',
help="A technical field to store the original Paypal unsupported currency amount"
" in case of payment with Paypal using a currency that is not supported by Paypal.")
|
[
"vijay+admin@wearme.me"
] |
vijay+admin@wearme.me
|
e5349826b1bf5d97988e22e049ad10187fab4a4b
|
677869880e88f669a436c78cd77dc3eee2a66569
|
/pythonkatas/primefactors/main.py
|
6ec728df598d050073646ad580f72a03c1689277
|
[] |
no_license
|
ErinLMoore/CodeKatas
|
36c6b5a860ee374497f1509b49adab4713e34de1
|
a6e77e0ecdc3c54e84901eabeff9270c65a96e1b
|
refs/heads/master
| 2020-04-06T07:02:12.463774
| 2016-09-29T02:23:26
| 2016-09-29T02:23:26
| 53,079,444
| 0
| 1
| null | 2016-07-06T15:56:13
| 2016-03-03T20:05:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 196
|
py
|
import sys
sys.path.append('~/workspace/CodeKatas/pythonkatas/src')
from src.primefactors import primeFactors
class main():
int_to_factor = int(sys.argv[1])
print (primeFactors(int_to_factor))
|
[
"emoore@pillartechnology.com"
] |
emoore@pillartechnology.com
|
91a53658d1e9736bda6759fad4317665e4ce9a63
|
796654a4e4961a764b2d7e036971da049a3b3cae
|
/project_2/bayes.py
|
f92b6ac248af1dce8b40d01e11b6b5a9c33f129f
|
[] |
no_license
|
nattip/CIS678
|
9cfe6ab6fbec855456140e98234b2ed358c20403
|
aceffe9144577727e10feb40a7e8ae9e2948e4a5
|
refs/heads/master
| 2022-07-09T11:12:45.863383
| 2020-04-02T15:15:25
| 2020-04-02T15:15:25
| 231,259,734
| 0
| 1
| null | 2022-06-22T01:15:18
| 2020-01-01T20:20:40
|
Python
|
UTF-8
|
Python
| false
| false
| 11,604
|
py
|
# Written by Natalie Tipton
# February 2, 20202
# CIS 678 - Machine Learning
# Dr. Greg Wolffe
# Spam/Ham Classification of text messages using
# the Naive Bayes algorithm
from collections import Counter
import string
import math
import heapq
import matplotlib.pyplot as plt
import numpy as np
#################################################
# Function to clean incoming data:
# Turn all letters lower case
# Remove all punctuation
# Remove all numeric-only strings
def clean_words(all_words):
cleaned_words = []
for word in all_words:
word = word.lower()
for character in word:
if character in string.punctuation:
word = word.replace(character, "")
if not word.isnumeric() and word != "":
cleaned_words.append(word)
return cleaned_words
#################################################
if __name__ == "__main__":
# Raw text sorting
ham_raw = []
spam_raw = []
# Unique words for ham or spam
all_words = []
vocab = set()
# Probabilities of words in each class
prob_ham = {}
prob_spam = {}
prob_diff = {}
# initial counts for number of ham/spam messages
ham_count = 0
spam_count = 0
# read in file one line at a time
with open("./train.data") as f:
lines = f.readlines()
# Read all of the lines and append them to their classification
for line in lines:
if line.startswith("ham"):
# Get the whole string
ham_count += 1
raw_text = "".join(line.split("ham"))[1:].rstrip()
# split string by spaces and append each word appropriately
for val in raw_text.split(" "):
all_words.append(val)
ham_raw.append(val)
if line.startswith("spam"):
spam_count += 1
raw_text = "".join(line.split("spam"))[1:].rstrip()
for val in raw_text.split(" "):
all_words.append(val)
spam_raw.append(val)
# clean words in all classifications
cleaned_words = clean_words(all_words)
cleaned_ham_words = clean_words(ham_raw)
cleaned_spam_words = clean_words(spam_raw)
# count up occurrences of each word in all places
cleaned_words_counts = Counter(cleaned_words)
cleaned_ham_word_counts = Counter(cleaned_ham_words)
cleaned_spam_word_counts = Counter(cleaned_spam_words)
# create vocabulary with no repeating words
vocab = set(cleaned_words)
# remove 5 most common words in all messages
vocab.remove("to")
vocab.remove("i")
vocab.remove("you")
vocab.remove("a")
vocab.remove("the")
# find sizes of all different sets of words
vocab_size = len(vocab)
spam_size = len(cleaned_spam_words)
ham_size = len(cleaned_ham_words)
# make dictionaries of spam and ham words with the probability
# that each word will occur in either classification
for word in vocab:
prob_spam[word] = (cleaned_spam_word_counts[word] + 1) / (
spam_size + vocab_size
)
prob_ham[word] = (cleaned_ham_word_counts[word] + 1) / (
ham_size + vocab_size
)
# find the difference between spam and ham probabilities
# for future analysis of most obvious classifications
prob_diff[word] = prob_ham[word] - prob_spam[word]
# Use this to find words that are common in everything
# and maybe not count those for bayes
# print(cleaned_words_counts.most_common(5))
# calculate overall percentage of spam and ham messages in training
prob_of_ham_message = ham_count / (ham_count + spam_count)
prob_of_spam_message = spam_count / (ham_count + spam_count)
# lists of true and hypothesized classifications
true_class = []
hyp_class = []
# open test data line by line
with open("./test.data") as f:
lines = f.readlines()
# set counters for true and hypothesized classes to 0
test_ham_count = 0
test_spam_count = 0
hyp_ham_count = 0
hyp_spam_count = 0
# go through lines 1 at a time
for line in lines:
# reset the big product for naive bayes calculation back to 0
big_product_ham = 0
big_product_spam = 0
# pull the true classification off the message into a list
true_class.append(line.split()[0])
# count up the true occurrences for percentage calculations
if line.split()[0] == "ham":
test_ham_count += 1
if line.split()[0] == "spam":
test_spam_count += 1
# leave only the message w/o the classification remaining
message = line.split()[1:]
# for each word in the message
for word in message:
# do not count if word is not in vocabulary
if word not in vocab:
continue
# naive bayes formula using log rules
big_product_ham += math.log10(prob_ham[word])
big_product_spam += math.log10(prob_spam[word])
cnb_ham = math.log10(prob_of_ham_message) + big_product_ham
cnb_spam = math.log10(prob_of_spam_message) + big_product_spam
# classify message
if cnb_ham > cnb_spam:
hyp_class.append("ham")
hyp_ham_count += 1
elif cnb_ham < cnb_spam:
hyp_class.append("spam")
hyp_spam_count += 1
else:
hyp_class.append("ham")
hyp_ham_count += 1
true_pos = 0
true_neg = 0
false_pos = 0
false_neg = 0
# count up true and false pos and negs
for i in range(0, len(true_class)):
if true_class[i] == "spam" and hyp_class[i] == "spam":
true_pos += 1
elif true_class[i] == "ham" and hyp_class[i] == "ham":
true_neg += 1
elif true_class[i] == "spam" and hyp_class[i] == "ham":
false_neg += 1
elif true_class[i] == "ham" and hyp_class[i] == "spam":
false_pos += 1
# calculate metrics for model
correct = (
100 * (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
)
recall = 100 * (true_pos / (true_pos + false_neg))
tnr = 100 * (true_neg / (true_neg + false_pos))
precision = 100 * (true_pos / (true_pos + false_pos))
#################################################
# Output
# Printed metrics
# Pie Charts
# Bar Charts
print("\ntrue positives =", true_pos)
print("true negatives =", true_neg)
print("false positives =", false_pos)
print("false negatives =", false_neg)
print("\nRecall =", recall)
print("Precision =", precision)
print("True Negative Rate =", tnr)
print("correct classification =", correct, "\n")
# determining what the most telling spam and ham words are
# print(heapq.nlargest(5, prob_diff, key=prob_diff.get))
# print(heapq.nsmallest(5, prob_diff, key=prob_diff.get))
# create pie charts to show percentage of spam and ham
# messages in training and testing and what the model
# determined for the testing set
labels = "Spam", "Ham"
sizes = [prob_of_spam_message, prob_of_ham_message]
explode = (0.1, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(
sizes,
explode=explode,
labels=labels,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("True Training Percentages")
plt.show()
labels = "Spam", "Ham"
sizes = [
test_spam_count / (test_spam_count + test_ham_count),
test_ham_count / (test_spam_count + test_ham_count),
]
explode = (0.1, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig2, ax1 = plt.subplots()
ax1.pie(
sizes,
explode=explode,
labels=labels,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("True Testing Percentages")
plt.show()
labels = "Spam", "Ham"
sizes = [
hyp_spam_count / (hyp_spam_count + hyp_ham_count),
hyp_ham_count / (hyp_spam_count + hyp_ham_count),
]
explode = (0.1, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig3, ax1 = plt.subplots()
ax1.pie(
sizes,
explode=explode,
labels=labels,
autopct="%1.1f%%",
shadow=True,
startangle=90,
)
ax1.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Hypothesized Testing Percentages")
plt.show()
# create stacked bar charts to show the probability of spam or ham
# for the most telling spam words and then the most telling ham words
plt.figure(4)
N = 5
# showing probability for the words determined to be most telling for spam
top_spam_probs = (
prob_spam["call"],
prob_spam["free"],
prob_spam["txt"],
prob_spam["claim"],
prob_spam["your"],
)
low_ham_probs = (
prob_ham["call"],
prob_ham["free"],
prob_ham["txt"],
prob_ham["claim"],
prob_ham["your"],
)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, low_ham_probs, width)
p2 = plt.bar(ind, top_spam_probs, width, bottom=low_ham_probs)
plt.ylabel("Probability of Word")
plt.title("Probability of Spam or Ham\nFor Most Telling Spam Words")
plt.xticks(ind, ("call", "free", "txt", "claim", "your"))
plt.legend((p1[0], p2[0]), ("Ham", "Spam"))
plt.show()
plt.figure(5)
N = 5
# showing probability for the words determined to be most telling for ham
low_spam_probs = (
prob_spam["my"],
prob_spam["me"],
prob_spam["in"],
prob_spam["it"],
prob_spam["u"],
)
top_ham_probs = (
prob_ham["my"],
prob_ham["me"],
prob_ham["in"],
prob_ham["it"],
prob_ham["u"],
)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, low_spam_probs, width)
p2 = plt.bar(ind, top_ham_probs, width, bottom=low_spam_probs)
plt.ylabel("Probability of Word")
plt.title("Probability of Spam or Ham\nFor Most Telling Ham Words")
plt.xticks(ind, ("my", "me", "in", "it", "u"))
plt.legend((p1[0], p2[0]), ("Spam", "Ham"))
plt.show()
|
[
"tiptonna@mail.gvsu.edu"
] |
tiptonna@mail.gvsu.edu
|
26c93247ee4200baadf6355f694c14262a0ea35e
|
0d09e32620e2e82f243ba86e2cc7bec19e521b1b
|
/Exercises/01-Sequential-Structure/ex06.py
|
ac27e05e4d8dd83905a5665183e94f38cefa3c27
|
[] |
no_license
|
hikarocarvalho/Python_Wiki
|
401f7466377f2053cda8bfa850afd0bd64cce047
|
01f755ecc18de13a9ded794ece9e7a8bd4ad7d9e
|
refs/heads/main
| 2023-06-14T08:37:10.728067
| 2021-07-12T20:24:41
| 2021-07-12T20:24:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
#the user gives the ray value
#o usuário dá o valor do raio
ray = float(input("Enter with the ray value: "))
#calculate the area of a circle
#calcula a área do circulo
area = ray**2 * 3.14
#Show the result
#mostra o resultado
print("The area of this circle is:",area)
|
[
"hikarofcarvalho@gmail.com"
] |
hikarofcarvalho@gmail.com
|
a54d47e36ae5a6c355b6eeb86ee81846d8e817a5
|
206f9385bcb87bf59f5a577043c0abfed8ab0066
|
/Asynchronous_IO/event_loop_dif_thread.py
|
13ad2d93012b41d1f008674bf0294785baf6e367
|
[] |
no_license
|
XiangSugar/Python
|
7bca1ea5a90d880f1ed4f0b3dd4fc3310caf7fdc
|
2356dc3342dbc9c474cf0d9f25b1dc7930bc8814
|
refs/heads/master
| 2021-04-12T01:51:26.751494
| 2018-09-04T01:25:48
| 2018-09-04T01:25:48
| 125,846,325
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
# coding = utf-8
from threading import Thread, currentThread
import asyncio
import time
now = lambda: time.time()
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
def more_work(x):
print('%s' % currentThread())
print('More work {}'.format(x))
time.sleep(x)
print('Finished more work {}'.format(x))
start = now()
new_loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=(new_loop,))
t.start()
print('TIME: {}'.format(time.time() - start))
print('%s' % currentThread())
new_loop.call_soon_threadsafe(more_work, 6)
new_loop.call_soon_threadsafe(more_work, 3)
# '''
# 启动上述代码之后,当前线程不会被 block,新线程中会按照顺序执行
# call_soon_threadsafe 方法注册的 more_work 方法,后者因为
# time.sleep 操作是同步阻塞的,因此运行完毕 more_work 需要大致 6 + 3
# '''
|
[
"suxiang@hust.edu.cn"
] |
suxiang@hust.edu.cn
|
9c39ba0a267b3fb8b32d1167382688f2d07176a5
|
5950e578d3c6239987bc189951334cc87ca1a966
|
/198. House Robber.py
|
fc45a3e04b49703b17b6b5bbbecf92e6bf7d5771
|
[] |
no_license
|
a5135324/leetcode
|
0aa5ddc42569f9a619394197afcd1b29e856687e
|
c3eb9000154859669ed18054eca04c44023ef496
|
refs/heads/master
| 2021-02-14T14:40:52.336373
| 2020-12-27T09:03:59
| 2020-12-27T09:03:59
| 244,812,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
'''
Detail:
Runtime: 20 ms, faster than 98.54% of Python3 online submissions for House Robber.
Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for House Robber.
Submit time: 2020/03/06 13:37(UTC+8)
'''
class Solution:
def rob(self, nums: List[int]) -> int:
n = len(nums)
dp = [0,0,0]
ans = 0
for i in range(3,n+3):
dp.append(max(dp[i-3]+nums[i-3], dp[i-2]+nums[i-3]))
if dp[i] > ans:
ans = dp[i]
return ans
|
[
"a5135324@gmail.com"
] |
a5135324@gmail.com
|
dcebdc63fede722d67b16b2fb6757f67c3e20076
|
59f03c7528c9c806e3e25b9864db89f25dfa73c2
|
/tests/conftest.py
|
340ea85a5b213e3a629d9ba903138e93c3473583
|
[
"MIT"
] |
permissive
|
OneGov/onegov-cloud
|
8d8cd6d0378991ebc2333b62337246719102e723
|
c706b38d5b67692b4146cdf14ef24d971a32c6b8
|
refs/heads/master
| 2023-08-24T15:37:52.536958
| 2023-08-24T14:15:54
| 2023-08-24T14:15:54
| 189,431,418
| 17
| 4
|
MIT
| 2023-09-14T20:39:37
| 2019-05-30T14:47:14
|
Python
|
UTF-8
|
Python
| false
| false
| 34
|
py
|
pytest_plugins = ['tests.shared']
|
[
"denis.krienbuehl@seantis.ch"
] |
denis.krienbuehl@seantis.ch
|
9918758e1b829eee564875e4744dd8f4094f8f34
|
bbefc4e1252b984625bc5b94b244d2e9838e4100
|
/neuralnetwork/neuralNetwork.py
|
e8bd07d938d8993b0597b093db7adf1cad62f94b
|
[] |
no_license
|
ckethan0429/Machine-Learning
|
5a8add0a8c56695b1d874c9f4bc44236b39dcd2d
|
d6d1b5fb2ceda866e8334a0a6d2a8bf5df864a3c
|
refs/heads/master
| 2020-05-03T13:36:22.147819
| 2019-04-02T11:54:33
| 2019-04-02T11:54:33
| 178,656,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,641
|
py
|
import numpy
from scipy.special import expit
import pandas as pd
class neuralNetwork:
#신경망 초기화하기
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate, path = None):
#입력, 은닉, 출력 계층의 노드 개수 설정
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
#학습률
self.lr = learningrate
#가중치 행렬
if(path != None) :
self.load_weight(path)
else:
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
#활성화 함수로 시그모이드 함수 설정
self.activation_function = lambda x: expit(x)
pass
def load_weight(self, path):
self.wih = pd.read_csv(path + "_wih.csv", header=None)
self.who = pd.read_csv(path + "_who.csv", header=None)
def save_weight(self, path):
pd.DataFrame(self.wih).to_csv(path+ "_wih.csv", index = False, header= None)
pd.DataFrame(self.who).to_csv(path+ "_who.csv", index = False, header= None)
#신경망 학습시키기
def train(self, inputs_list, targets_list):
#입력 리스트를 2차원 행렬로 변환
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
#은닉계층으로 들어오는 신호를 계산
hidden_inputs = numpy.dot(self.wih, inputs)
#은닉계층에서 나가는 신호를 계산
hidden_outputs = self.activation_function(hidden_inputs)
#최종 출력 계층으로 들어오는 신호를 계산
final_inputs = numpy.dot(self.who, hidden_outputs)
#최종 출력 계층으로 나가는 신호를 계산
final_outputs = self.activation_function(final_inputs)
#2단계 가중치 업데이트
#출력계층의 오차(실제값 - 계산값)
output_errors = targets - final_outputs
#은닉계층의 오차는 가중치 값의 비례로 재조정
hidden_errors = numpy.dot(self.who.T, output_errors)
#은닉계층과 출력계층간의 가중치 업데이트
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
#신경망에 질의하기
def query(self, inputs_list):
#입력 리스트를 2차원 행렬로 변환
inputs = numpy.array(inputs_list, ndmin=2).T
#은닉계층으로 들어오는 신호를 계산
hidden_inputs = numpy.dot(self.wih, inputs)
#은닉계층에서 나가는 신호를 계산
hidden_outputs = self.activation_function(hidden_inputs)
#최종 출력 계층으로 들어오는 신호를 계산
final_inputs = numpy.dot(self.who, hidden_outputs)
#최종 출력 계층으로 나가는 신호를 계산
final_outputs = self.activation_function(final_inputs)
return final_outputs
if __name__ == "__main__":
#입력, 은닉, 출력 노드의 수
input_nodes = 3
hidden_nodes = 3
output_nodes = 3
#학습률은 0.3으로 정의
learning_rate = 0.3
#신경망의 인스턴스를 생성
n = neuralNetwork(input_nodes, hidden_nodes,output_nodes, learning_rate)
print("n.query = ", n.query([1.0, 0.5, -1.5]))
|
[
"oops0429@gmail.com"
] |
oops0429@gmail.com
|
65e75880a88ed030115589f4646788aa413fd91e
|
7f7213fe407f252b2323025c9b9e381a73474b7d
|
/drag_sheet/sds/check_files.py
|
077feba9a15d97280ae0c711e7c9b67f9866a925
|
[] |
no_license
|
ahy3nz/graphene_build
|
0ce62c2123b8c39248048d2cafbd0aafdd06ff9a
|
44590b8db799136929fc06e490151f450ad30029
|
refs/heads/master
| 2021-06-20T07:28:20.958807
| 2019-07-03T19:48:24
| 2019-07-03T19:48:24
| 135,504,561
| 0
| 1
| null | 2018-08-06T21:47:17
| 2018-05-30T22:40:17
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
import os
import itertools as it
curr_dir = os.getcwd()
sds_folders = ['10sds', '20sds', '30sds', '40sds', '50sds', '60sds',
'70sds', '80sds', '90sds', '100sds']
k_folders = ['k50']
angle_folders = ['0']
trials = ['a', 'b', 'c']
for combo in it.product(sds_folders, k_folders, angle_folders,trials):
sim_dir = os.path.join(curr_dir, '{0}/{1}_{2}_{3}'.format(*combo))
if os.path.isdir(sim_dir):
os.chdir(sim_dir)
if not os.path.isfile('pull.gro'):
print(sim_dir)
else:
print("{} doesn't exist".format(sim_dir))
|
[
"alexander.h.yang@vanderbilt.edu"
] |
alexander.h.yang@vanderbilt.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.