blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58886a0fe49b2b547bce5fc1506904ba9f8bbd44 | 28381c3cc17761bc06f0c95e0e0ae28276cca2d7 | /manage.py | 82de5fd67b825771d6c8a3fbf97233f586e1cf95 | [] | no_license | ankita21prasad/craigslist-clone | ddce0db9a3aa53ed6f8508826485be4dade157d3 | 8c28b8fc1ade15b03d008fb814aa9ab107b2c1fc | refs/heads/master | 2022-07-15T02:28:53.468733 | 2020-05-19T13:44:39 | 2020-05-19T13:44:39 | 265,256,363 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'craigslist.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ankita2108prasad@gmail.com"
] | ankita2108prasad@gmail.com |
c5dca2065aacf2b0ee59d318d60cb64a9fc6d155 | 6be7f986b59dc1ac02c54e0bd9a3d46127bb6d33 | /examples/lesson8/walk_folder.py | f1e35076eac9370d959db88a554033ebd75a069c | [] | no_license | sol87/TDTutorial2 | 4d143e6c381301e00d636476a8b018807f35bf52 | 49dd68ea4a0d3ccf44c1efc7dc63b78ce66d5078 | refs/heads/master | 2020-03-28T17:09:25.073663 | 2018-09-29T13:12:33 | 2018-09-29T13:12:33 | 148,762,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import os
def walk_folders(folder_path):
sub_file_list = []
for sub_file_name in os.listdir(folder_path):
sub_path = os.path.join(folder_path, sub_file_name)
if os.path.isfile(sub_path):
sub_file = {"type": "file", "name": sub_file_name}
sub_file_list.append(sub_file)
else:
sub_folder = {"type": "folder", "name": sub_file_name, "sub_files": walk_folders(sub_path)}
sub_file_list.append(sub_folder)
return sub_file_list
if __name__ == "__main__":
print walk_folders("e:/temp")
| [
"guoliangxu1987@163.com"
] | guoliangxu1987@163.com |
97c3d4708e7a5c659b516edef9ebd33c1c164dbc | f66201c96e38cbdc1f345b24be31106b43496e26 | /manage.py | e591674f09e70a40cddea8db036c09b45c392be5 | [] | no_license | vivek4112/DjangoProj | 8e241aad1cee5f60d847077d801c9f6f06191653 | 6a5ec99717b21c294b5ef8820e5ab2c7fb499b18 | refs/heads/develop | 2022-12-11T11:36:20.289771 | 2018-08-20T15:43:43 | 2018-08-20T15:43:43 | 144,486,214 | 0 | 0 | null | 2021-06-10T20:45:10 | 2018-08-12T17:16:46 | HTML | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weread.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"18182313+vivek4112@users.noreply.github.com"
] | 18182313+vivek4112@users.noreply.github.com |
f86fab53d5d760b38c0e7a595bbe3472b3653bce | 4dbe83d625dfbc5da6eea2c73a41feda6cbea708 | /scalyr_agent/third_party_tls/oscrypto/_osx/_core_foundation_ctypes.py | 138544f0c96b85ef866644988987c69598bc691c | [
"Apache-2.0"
] | permissive | Kami/scalyr-agent-2 | e95732763e57ab768f488690574a58fd0ea6acd4 | b26ebb6a74c2670ae28052079f2fac95d88e832a | refs/heads/master | 2021-03-16T17:58:44.098136 | 2020-06-03T13:40:11 | 2020-06-03T13:40:11 | 246,929,083 | 0 | 0 | Apache-2.0 | 2020-06-03T13:48:26 | 2020-03-12T20:53:56 | Python | UTF-8 | Python | false | false | 13,520 | py | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
from ctypes.util import find_library
from ctypes import c_void_p, c_long, c_uint32, c_char_p, c_byte, c_ulong, c_bool
from ctypes import CDLL, string_at, cast, POINTER, byref
import ctypes
from .._ffi import FFIEngineError, buffer_from_bytes, byte_string_from_buffer
from ..errors import LibraryNotFoundError
__all__ = [
'CFHelpers',
'CoreFoundation',
]
core_foundation_path = find_library('CoreFoundation')
if not core_foundation_path:
raise LibraryNotFoundError('The library CoreFoundation could not be found')
CoreFoundation = CDLL(core_foundation_path, use_errno=True)
CFIndex = c_long
CFStringEncoding = c_uint32
CFArray = c_void_p
CFData = c_void_p
CFString = c_void_p
CFNumber = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFTypeID = c_ulong
CFBoolean = c_void_p
CFNumberType = c_uint32
CFTypeRef = POINTER(CFType)
CFArrayRef = POINTER(CFArray)
CFDataRef = POINTER(CFData)
CFStringRef = POINTER(CFString)
CFNumberRef = POINTER(CFNumber)
CFBooleanRef = POINTER(CFBoolean)
CFDictionaryRef = POINTER(CFDictionary)
CFErrorRef = POINTER(CFError)
CFAllocatorRef = c_void_p
CFDictionaryKeyCallBacks = c_void_p
CFDictionaryValueCallBacks = c_void_p
CFArrayCallBacks = c_void_p
pointer_p = POINTER(c_void_p)
try:
CoreFoundation.CFDataGetLength.argtypes = [
CFDataRef
]
CoreFoundation.CFDataGetLength.restype = CFIndex
CoreFoundation.CFDataGetBytePtr.argtypes = [
CFDataRef
]
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
CoreFoundation.CFDataCreate.argtypes = [
CFAllocatorRef,
c_char_p,
CFIndex
]
CoreFoundation.CFDataCreate.restype = CFDataRef
CoreFoundation.CFDictionaryCreate.argtypes = [
CFAllocatorRef,
CFStringRef,
CFTypeRef,
CFIndex,
CFDictionaryKeyCallBacks,
CFDictionaryValueCallBacks
]
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
CoreFoundation.CFDictionaryGetCount.argtypes = [
CFDictionaryRef
]
CoreFoundation.CFDictionaryGetCount.restype = CFIndex
CoreFoundation.CFStringGetCStringPtr.argtypes = [
CFStringRef,
CFStringEncoding
]
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
CoreFoundation.CFStringGetCString.argtypes = [
CFStringRef,
c_char_p,
CFIndex,
CFStringEncoding
]
CoreFoundation.CFStringGetCString.restype = c_bool
CoreFoundation.CFStringCreateWithCString.argtypes = [
CFAllocatorRef,
c_char_p,
CFStringEncoding
]
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
CoreFoundation.CFNumberCreate.argtypes = [
CFAllocatorRef,
CFNumberType,
c_void_p
]
CoreFoundation.CFNumberCreate.restype = CFNumberRef
CoreFoundation.CFCopyTypeIDDescription.argtypes = [
CFTypeID
]
CoreFoundation.CFCopyTypeIDDescription.restype = CFStringRef
CoreFoundation.CFRelease.argtypes = [
CFTypeRef
]
CoreFoundation.CFRelease.restype = None
CoreFoundation.CFErrorCopyDescription.argtypes = [
CFErrorRef
]
CoreFoundation.CFErrorCopyDescription.restype = CFStringRef
CoreFoundation.CFErrorGetDomain.argtypes = [
CFErrorRef
]
CoreFoundation.CFErrorGetDomain.restype = CFStringRef
CoreFoundation.CFErrorGetCode.argtypes = [
CFErrorRef
]
CoreFoundation.CFErrorGetCode.restype = CFIndex
CoreFoundation.CFBooleanGetValue.argtypes = [
CFBooleanRef
]
CoreFoundation.CFBooleanGetValue.restype = c_byte
CoreFoundation.CFDictionaryGetTypeID.argtypes = []
CoreFoundation.CFDictionaryGetTypeID.restype = CFTypeID
CoreFoundation.CFNumberGetTypeID.argtypes = []
CoreFoundation.CFNumberGetTypeID.restype = CFTypeID
CoreFoundation.CFStringGetTypeID.argtypes = []
CoreFoundation.CFStringGetTypeID.restype = CFTypeID
CoreFoundation.CFDataGetTypeID.argtypes = []
CoreFoundation.CFDataGetTypeID.restype = CFTypeID
CoreFoundation.CFArrayCreate.argtypes = [
CFAllocatorRef,
POINTER(c_void_p),
CFIndex,
CFArrayCallBacks
]
CoreFoundation.CFArrayCreate.restype = CFArrayRef
CoreFoundation.CFArrayGetCount.argtypes = [
CFArrayRef
]
CoreFoundation.CFArrayGetCount.restype = CFIndex
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
CFArrayRef,
CFIndex
]
CoreFoundation.CFArrayGetValueAtIndex.restype = CFTypeRef
CoreFoundation.CFNumberGetType.argtypes = [
CFNumberRef
]
CoreFoundation.CFNumberGetType.restype = CFNumberType
CoreFoundation.CFNumberGetValue.argtypes = [
CFNumberRef,
CFNumberType,
c_void_p
]
CoreFoundation.CFNumberGetValue.restype = c_bool
CoreFoundation.CFDictionaryGetKeysAndValues.argtypes = [
CFDictionaryRef,
pointer_p,
pointer_p
]
CoreFoundation.CFDictionaryGetKeysAndValues.restype = CFIndex
CoreFoundation.CFGetTypeID.argtypes = [
CFTypeRef
]
CoreFoundation.CFGetTypeID.restype = CFTypeID
setattr(CoreFoundation, 'kCFAllocatorDefault', CFAllocatorRef.in_dll(CoreFoundation, 'kCFAllocatorDefault'))
setattr(CoreFoundation, 'kCFBooleanTrue', CFTypeRef.in_dll(CoreFoundation, 'kCFBooleanTrue'))
kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeDictionaryKeyCallBacks')
kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeDictionaryValueCallBacks')
kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(CoreFoundation, 'CFDataRef', CFDataRef)
setattr(CoreFoundation, 'CFErrorRef', CFErrorRef)
setattr(CoreFoundation, 'CFArrayRef', CFArrayRef)
kCFNumberCFIndexType = CFNumberType(14)
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
def _cast_pointer_p(value):
"""
Casts a value to a pointer of a pointer
:param value:
A ctypes object
:return:
A POINTER(c_void_p) object
"""
return cast(value, pointer_p)
class CFHelpers():
"""
Namespace for core foundation helpers
"""
_native_map = {}
@classmethod
def register_native_mapping(cls, type_id, callback):
"""
Register a function to convert a core foundation data type into its
equivalent in python
:param type_id:
The CFTypeId for the type
:param callback:
A callback to pass the CFType object to
"""
cls._native_map[int(type_id)] = callback
@staticmethod
def cf_number_to_number(value):
"""
Converts a CFNumber object to a python float or integer
:param value:
The CFNumber object
:return:
A python number (float or integer)
"""
type_ = CoreFoundation.CFNumberGetType(_cast_pointer_p(value))
c_type = {
1: c_byte, # kCFNumberSInt8Type
2: ctypes.c_short, # kCFNumberSInt16Type
3: ctypes.c_int32, # kCFNumberSInt32Type
4: ctypes.c_int64, # kCFNumberSInt64Type
5: ctypes.c_float, # kCFNumberFloat32Type
6: ctypes.c_double, # kCFNumberFloat64Type
7: c_byte, # kCFNumberCharType
8: ctypes.c_short, # kCFNumberShortType
9: ctypes.c_int, # kCFNumberIntType
10: c_long, # kCFNumberLongType
11: ctypes.c_longlong, # kCFNumberLongLongType
12: ctypes.c_float, # kCFNumberFloatType
13: ctypes.c_double, # kCFNumberDoubleType
14: c_long, # kCFNumberCFIndexType
15: ctypes.c_int, # kCFNumberNSIntegerType
16: ctypes.c_double, # kCFNumberCGFloatType
}[type_]
output = c_type(0)
CoreFoundation.CFNumberGetValue(_cast_pointer_p(value), type_, byref(output))
return output.value
@staticmethod
def cf_dictionary_to_dict(dictionary):
"""
Converts a CFDictionary object into a python dictionary
:param dictionary:
The CFDictionary to convert
:return:
A python dict
"""
dict_length = CoreFoundation.CFDictionaryGetCount(dictionary)
keys = (CFTypeRef * dict_length)()
values = (CFTypeRef * dict_length)()
CoreFoundation.CFDictionaryGetKeysAndValues(
dictionary,
_cast_pointer_p(keys),
_cast_pointer_p(values)
)
output = {}
for index in range(0, dict_length):
output[CFHelpers.native(keys[index])] = CFHelpers.native(values[index])
return output
@classmethod
def native(cls, value):
"""
Converts a CF* object into its python equivalent
:param value:
The CF* object to convert
:return:
The native python object
"""
type_id = CoreFoundation.CFGetTypeID(value)
if type_id in cls._native_map:
return cls._native_map[type_id](value)
else:
return value
@staticmethod
def cf_string_to_unicode(value):
"""
Creates a python unicode string from a CFString object
:param value:
The CFString to convert
:return:
A python unicode string
"""
string = CoreFoundation.CFStringGetCStringPtr(
_cast_pointer_p(value),
kCFStringEncodingUTF8
)
if string is None:
buffer = buffer_from_bytes(1024)
result = CoreFoundation.CFStringGetCString(
_cast_pointer_p(value),
buffer,
1024,
kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = byte_string_from_buffer(buffer)
if string is not None:
string = string.decode('utf-8')
return string
@staticmethod
def cf_string_from_unicode(string):
"""
Creates a CFStringRef object from a unicode string
:param string:
The unicode string to create the CFString object from
:return:
A CFStringRef
"""
return CoreFoundation.CFStringCreateWithCString(
CoreFoundation.kCFAllocatorDefault,
string.encode('utf-8'),
kCFStringEncodingUTF8
)
@staticmethod
def cf_data_to_bytes(value):
"""
Extracts a bytestring from a CFData object
:param value:
A CFData object
:return:
A byte string
"""
start = CoreFoundation.CFDataGetBytePtr(value)
num_bytes = CoreFoundation.CFDataGetLength(value)
return string_at(start, num_bytes)
@staticmethod
def cf_data_from_bytes(bytes_):
"""
Creates a CFDataRef object from a byte string
:param bytes_:
The data to create the CFData object from
:return:
A CFDataRef
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault,
bytes_,
len(bytes_)
)
@staticmethod
def cf_dictionary_from_pairs(pairs):
"""
Creates a CFDictionaryRef object from a list of 2-element tuples
representing the key and value. Each key should be a CFStringRef and each
value some sort of CF* type.
:param pairs:
A list of 2-element tuples
:return:
A CFDictionaryRef
"""
length = len(pairs)
keys = []
values = []
for pair in pairs:
key, value = pair
keys.append(key)
values.append(value)
keys = (CFStringRef * length)(*keys)
values = (CFTypeRef * length)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
_cast_pointer_p(byref(keys)),
_cast_pointer_p(byref(values)),
length,
kCFTypeDictionaryKeyCallBacks,
kCFTypeDictionaryValueCallBacks
)
@staticmethod
def cf_array_from_list(values):
"""
Creates a CFArrayRef object from a list of CF* type objects.
:param values:
A list of CF* type object
:return:
A CFArrayRef
"""
length = len(values)
values = (CFTypeRef * length)(*values)
return CoreFoundation.CFArrayCreate(
CoreFoundation.kCFAllocatorDefault,
_cast_pointer_p(byref(values)),
length,
kCFTypeArrayCallBacks
)
@staticmethod
def cf_number_from_integer(integer):
"""
Creates a CFNumber object from an integer
:param integer:
The integer to create the CFNumber for
:return:
A CFNumber
"""
integer_as_long = c_long(integer)
return CoreFoundation.CFNumberCreate(
CoreFoundation.kCFAllocatorDefault,
kCFNumberCFIndexType,
byref(integer_as_long)
)
| [
"echee@scalyr.com"
] | echee@scalyr.com |
a9f0ba50e1273c6a25a49d2e0bba74d1942c67b8 | b1c7a768f38e2e987a112da6170f49503b9db05f | /stockkeeping/migrations/0021_remove_purchase_stockitem.py | 56aa900ba0363ea26816b298cca975bca2319252 | [] | no_license | Niladrykar/bracketerp | 8b7491aa319f60ec3dcb5077258d75b0394db374 | ca4ee60c2254c6c132a38ce52410059cc6b19cae | refs/heads/master | 2022-12-11T04:23:07.504966 | 2019-03-18T06:58:13 | 2019-03-18T06:58:13 | 176,218,029 | 1 | 0 | null | 2022-12-08T03:01:46 | 2019-03-18T06:27:37 | JavaScript | UTF-8 | Python | false | false | 338 | py | # Generated by Django 2.0.6 on 2018-11-02 11:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stockkeeping', '0020_auto_20181102_1628'),
]
operations = [
migrations.RemoveField(
model_name='purchase',
name='stockitem',
),
]
| [
"niladry.kar85@gmail.com"
] | niladry.kar85@gmail.com |
ddbb3ae7a19ecd0e0b4eca63b7bc8cf0185801ef | 25d9d79c705d695cf96d9b77aef92825c86fb6cf | /maths/class_simple.py | 383e25be7fb3dcec19e021dca6bc9df3e9e25726 | [] | no_license | ioannispol/Py_projects | dc1d87f3ffbf62c763e78034c0de772ba66460fe | abc506b5cc6a1527bead8f3d1bdd590529166b82 | refs/heads/master | 2023-02-07T22:29:31.420366 | 2020-12-16T20:04:15 | 2020-12-16T20:04:15 | 275,893,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | class Calculations():
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
def sub(self):
return self.x - self.y
def mult(self):
return self.x * self.y
def div(self):
return self.x / self.y
| [
"giannis.polimenis@gmail.com"
] | giannis.polimenis@gmail.com |
d6e599830cf5eafbff4dbe710bfe7cf85f3b641b | a53a7114bd18bc91534f46e764f10a1907d4b5c2 | /双指针问题/三数之和.py | 05e6045d899f97c6befea289e278da799ab96c0c | [] | no_license | q798010412/untitled2 | ab8f219e820558b84fe70d273106abcff905579b | 24028a60d4cb69c153e6d23d710942c4cc231968 | refs/heads/master | 2022-11-10T01:35:09.468633 | 2020-06-26T04:18:48 | 2020-06-26T04:18:48 | 275,071,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from typing import List
def threeSum(nums: List[int]) -> List:
nums.sort()
res = []
for i in range(len(nums) - 2):
left = i + 1
right = len(nums) - 1
if i > 0 and nums[i] == nums[i - 1]:
continue
sum = nums[i] + nums[left] + nums[right]
while left < right:
if sum > 0:
right -= 1
elif sum < 0:
left += 1
else:
res.append([nums[i], nums[left], nums[right]])
while left < right and nums[left + 1] == nums[right]:
left += 1
while left < right and nums[right - 1] == nums[right]:
right -= 1
left += 1
right -= 1
return res
a = threeSum([-1, 0, 1, 2, -1, -4])
print(a)
| [
"798010412@qq.com"
] | 798010412@qq.com |
b906134b607c89cf803ce1ab88827ca311706925 | 9aa8dafe2fd0d05ba5041a888df2bcde1f139375 | /house-prices-advanced-regression-techniques.py | 5ad18042cfa0528e9283b7c442de9b11701bff51 | [] | no_license | maxwellsarpong/house_prices_predictions | 543805a046d044df5099f3630ff5588ec25c60e7 | a7272b3c542028a86bdb4f2eeac0b3e8a6e91815 | refs/heads/master | 2022-05-25T02:16:01.340007 | 2020-05-01T18:55:38 | 2020-05-01T18:55:38 | 260,523,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,271 | py | #!/usr/bin/env python
# coding: utf-8
# In[92]:
# importing the modules
import pandas as pd
import numpy as np
import matplotlib as plt
from sklearn.model_selection import train_test_split
# importing the data
train = pd.read_csv('C:/Users/Big-Max/Desktop/BIG-MAX/BOOKS\Machine Learning/DATA SCIENCE/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('C:/Users/Big-Max/Desktop/BIG-MAX/BOOKS\Machine Learning/DATA SCIENCE/house-prices-advanced-regression-techniques/test.csv')
sub = pd.read_csv('C:/Users/Big-Max/Desktop/BIG-MAX/BOOKS\Machine Learning/DATA SCIENCE/house-prices-advanced-regression-techniques/sample_submission.csv')
# In[53]:
# filling the missing values in both the train and test with 0
x = train.fillna(0)
y = test.fillna(0)
# we display a summary of the data
train.describe()
# In[17]:
# we show a correlation between data
train.corr()
# In[297]:
df = x.loc[:,['OverallQual','OverallCond','YearBuilt','SalePrice']]
df.plot.area(x = 'OverallQual', y = 'SalePrice')
# In[44]:
# a scatter plot of relationship between OverallQual and SalePrice
df.plot.scatter(x = 'OverallQual', y = 'SalePrice')
# In[305]:
# relationship between overall condition and price
df.plot.scatter(x = 'OverallCond', y = 'SalePrice')
# In[74]:
x_2 = train.loc[:,['Id','MSSubClass','LotFrontage','LotArea','OverallQual','OverallCond','YearBuilt','YearRemodAdd','MasVnrArea','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF','1stFlrSF','2ndFlrSF ','LowQualFinSF','GrLivArea','BsmtFullBath','BsmtHalfBath','FullBath','HalfBath','BedroomAbvGr','KitchenAbvG','TotRmsAbvGrd','Fireplaces','GarageYrBlt','GarageCars','GarageArea','WoodDeckSF','OpenPorchSF','EnclosedPorch','3SsnPorch','ScreenPorch','PoolArea','MiscVal','MoSold','YrSold','SalePrice']]
x_3 = x_2.fillna(0)
# In[120]:
xx = x_3.drop(['SalePrice'], axis = 1)
yy = np.ravel(x_3['SalePrice'])
# In[190]:
# splitting the data
x_train,x_test,y_train,y_test = train_test_split(xx, yy, test_size= 0.4, random_state =42)
# In[191]:
############################### training the model with SUPPORT VECTOR MACHINE ####################################
from sklearn.svm import SVR
model = SVR(kernel = 'linear')
model.fit(x_train,y_train)
# In[192]:
# predicting the sales
pred = model.predict(x_test)
pred = pd.DataFrame(pred, columns=['Predicted_sales'])
# In[193]:
# measuring the model MSE
from sklearn.metrics import r2_score,mean_squared_error
mse = mean_squared_error(y_test, pred)
mse
# In[194]:
# RMSE
rmse = np.sqrt(mse)
rmse
# In[216]:
# SELECTING THE COLUMN ID
Id = x_3['Id']
Id = Id.dropna()
Id = pd.DataFrame(Id)
# In[220]:
# JOINING THE COLUMNS
result = pd.concat([Id, pred], axis = 1)
# In[221]:
# PREDICTING THE RESULTS
result = result.dropna()
result
# In[233]:
# TESTING WITH NEW DATA
new = xx.groupby('Id')
a = new.get_group(1460)
# In[237]:
# PREDICTING
z = model.predict(a)
z = pd.DataFrame(z)
z
# In[232]:
a
# In[279]:
##################################### new model GAUSSIAN ##############################################
from sklearn.naive_bayes import GaussianNB
model_2 = GaussianNB()
# In[289]:
# model training
model_2.fit(x_train, y_train)
# In[290]:
# model predicting
G_pred = model_2.predict(x_test)
G_pred = pd.DataFrame(G_pred, columns=['Predicted_sales'])
# In[291]:
from sklearn.metrics import r2_score,mean_squared_error
G_test = mean_squared_error(y_test,pred)
# In[292]:
G_test
# In[293]:
G_test_sq = np.sqrt(G_test)
G_test_sq
# In[294]:
resul_2 = pd.concat([Id, G_pred], axis = 1)
result_2 = resul_2.dropna()
result_2
# In[295]:
########################## LINEAR REGRESSION#####################################################
from sklearn.linear_model import LinearRegression
model_3 = LinearRegression(normalize=True)
# In[254]:
model_3.fit(x_train,y_train)
# In[272]:
LR_pred = model_3.predict(x_test)
LR_pred = pd.DataFrame(LR_pred, columns=['Predicted_sales'])
# In[273]:
from sklearn.metrics import r2_score,mean_squared_error
LR_mse = mean_squared_error(y_test,LR_pred)
LR_mse
# In[274]:
LR_rmse = np.sqrt(LR_mse)
LR_rmse
# In[275]:
resul_3 = pd.concat([Id,LR_pred], axis = 1)
result_3 =resul_3.dropna()
result_3
# In[ ]:
| [
"maxwellsarpong07@gmail.com"
] | maxwellsarpong07@gmail.com |
3edd332be7516c04587484c513e5dbf4f7470e91 | 123ed4d9abce7adee63a9d22839751f21df1ac13 | /bread_crumbs.py | ba7530f0b8923205748ae6119af9e0cc2a2c32f2 | [] | no_license | alexey-kott/data_structures_and_algorythms | 06e2c939fa27fa449b5de9c828d47683696b9cbe | 6fc906025f1e40b5eccecf4a5e3d228dff095a98 | refs/heads/master | 2022-01-05T15:49:55.834642 | 2019-05-13T16:32:48 | 2019-05-13T16:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | from typing import Dict, List
a = {
'b': 4,
'c': {
'd': 3,
'e': 5,
'f': {
'g': 8,
}
}
}
b = [('b', 4), ('c.d', 3), ('c.e', 5), ('c.f.g', 8)]
def extract(node: Dict, path: str = '') -> List:
paths = []
for key, value in node.items():
if path:
nested_path = path+'.'+key
else:
nested_path = key
if isinstance(value, int):
paths.append((nested_path, value))
elif isinstance(value, dict):
nested_paths = extract(value, nested_path)
paths.extend(nested_paths)
return paths
if __name__ == "__main__":
assert b == extract(a)
| [
"aleksey.rautkin@onetwotrip.com"
] | aleksey.rautkin@onetwotrip.com |
9611ea5034446ad9760170ff5cf6b279139456de | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/pydev/pydev_pysrc.py | b9ed49e8005e3b547bd967bac75b0d83e7dd1861 | [
"Apache-2.0",
"EPL-1.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 100 | py | '''An empty file in pysrc that can be imported (from sitecustomize) to find the location of pysrc''' | [
"dmitry.trofimov@jetbrains.com"
] | dmitry.trofimov@jetbrains.com |
e3bf9755f80a733381018bc4cff5a8a3ed3e0097 | e90cdfe33c204987372fd56177df70c09436c003 | /apps/users/views.py | a8f7e3956227245775925c3a9831f5947f1ab642 | [] | no_license | Aliyaseidalieva/django_cinema | c4f2884197a4d4949317557e26325e97d451f71c | feedfe9410834c9c0fe0a939a56a4ae57153f7ea | refs/heads/master | 2022-12-18T08:52:01.813359 | 2020-09-19T09:23:37 | 2020-09-19T09:23:37 | 270,371,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | from rest_framework.generics import (ListCreateAPIView,RetrieveUpdateDestroyAPIView,)
from rest_framework.permissions import IsAuthenticated
from .models import userProfile
from .permissions import IsOwnerProfileOrReadOnly
from .serializers import userProfileSerializer
class UserProfileListCreateView(ListCreateAPIView):
queryset=userProfile.objects.all()
serializer_class=userProfileSerializer
permission_classes=[IsAuthenticated]
def perform_create(self, serializer):
user=self.request.user
serializer.save(user=user)
class userProfileDetailView(RetrieveUpdateDestroyAPIView):
queryset=userProfile.objects.all()
serializer_class=userProfileSerializer
permission_classes=[IsOwnerProfileOrReadOnly,IsAuthenticated]
| [
"aseidalieva@askartec.com"
] | aseidalieva@askartec.com |
4b5c2feae0827ca54d89ebebb2759cf9ee89b6ef | 65f43c876a05a3e741716ee51a3a0d66833b474f | /NovoService/sumario.py | 99d872c478ee02cbfdb01c985c344312c2c510bf | [] | no_license | lgpbentes/OpenLab | cb2638583d1a2e194172a8bec857711feb009be4 | 148d8bc5844e17437afe62f89aa5710f4a476122 | refs/heads/master | 2020-12-07T00:47:01.536609 | 2017-04-21T14:47:07 | 2017-04-21T14:47:07 | 66,236,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py |
def LoadSummarizerByUser(user, timestamp, event, idView, sumarioL):
#Percorre o sumario atras do usuario
exemplo = sumarioL
#print "exemplo"
#print exemplo
t = []
i = 0
if sumarioL:
#print "if"
for line in sumarioL:
#print "for"
listDataLine = line.split(";")
if user == listDataLine[0]:
#print "if1"
t = listDataLine
break
i += 1
if(t == []):
#print "if2"
Delta = "0"
Data = user + ";" + timestamp + ";" + Delta + ";" + idView
#print "Data"
#print Data
exemplo.append(Data)
else:
#print "else"
if(event == "click"):
#print "if3"
Delta = "0"
Data = user + ";" + timestamp + ";" + Delta + ";" + idView
else:
#print "else2"
Delta = str(int(timestamp) - int(t[1]))
Data = user + ";" + t[1] + ";" + Delta + ";" + idView
#print Data
exemplo[i] = Data
else:
#print "else3"
Delta = "0"
Data = user + ";" + timestamp + ";" + Delta + ";" + idView
#print "Data"
#print Data
exemplo.append(Data)
return exemplo
def LoadQuestionTime (user, timeQuestions, idQuestion, timestamp):
flagUserExists = False
#Verificando se o usuario existe na lista do timeQuestions
for userActives in timeQuestions:
if userActives[0][0] == user:
flagUserExists = True
#adicionando na lista do recomender este usuario
if flagUserExists == False:
#print "if"
timeQuestions.append([[user, idQuestion], [0,0,0], [0,0,0], [0,0,0]])
if len(timeQuestions) > 0:
i = 0
for userActives in timeQuestions:
if userActives[0][0] == user:
if userActives[0][1] == idQuestion:
if userActives[idQuestion][0] == 0:
userActives[idQuestion][0] = timestamp
else:
userActives[idQuestion][1] = (timestamp - userActives[idQuestion][0] + userActives[idQuestion][1])
userActives[idQuestion][0] = timestamp
userActives[idQuestion][2] = (userActives[idQuestion][1])/1000.0
else:
userActives[idQuestion][0] = timestamp
userActives[0][1] = idQuestion
#print userActives
#print ""
#print feedback
return timeQuestions
#print LoadSummarizerByUser("0001", "2000", "null", "conteudo", [])
#print LoadSummarizerByUser("0002", "9000", "null", "conteudo", ['0001;2000;0;conteudo'])
#print LoadSummarizerByUser("0001", "10000", "null", "conteudo", ['0001;2000;0;conteudo', '0002;9000;0;conteudo']) | [
"aac@icomp.ufam.edu.br"
] | aac@icomp.ufam.edu.br |
91de9cf160a22a8ed4e613cd2c4972ab7439f865 | 359cb4d3dc4249c9d9c20a632d6a06b82c404cad | /mysql_config.py | 9dd62005b234323f876b7025833d4e03a270f786 | [] | no_license | CUrW-SL/from-old-curw-schema-to-new-schema | 0b275e007d887e8aef3afad1b3ce2415eda7af89 | 5bdb90ac81288b371341f3ad0ee74189c88468db | refs/heads/master | 2021-04-03T04:05:50.693288 | 2018-04-19T06:53:21 | 2018-04-19T06:53:21 | 124,373,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | OLD_DB_CONFIG = {
'host': '',
'port': 3306,
'user': 'curw_user',
'password': '',
'db': 'curw'
}
NEW_DB_CONFIG = {
'host': '',
'port': 3306,
'user': 'curw_new_user',
'password': '',
'db': 'curw_new'
}
START_DATE_TIME = '2018-01-01 00:00:00'
| [
"madumalt@gmail.com"
] | madumalt@gmail.com |
92fa0a39166a1280f0826dd7f76fbed92352e96f | a44e975490591fc0d468e6f3a4505d8731e11111 | /packaged_dags/deanslist_dags/deanslist_daily_endpoints.py | 819e326151e3cda75cd2b2e6eae71263b815a768 | [
"Apache-2.0"
] | permissive | Altons/silo-airflow | 1b5673d02f67d853c2258d9393f4b8b29d6d92cc | 21166392404152d3e8056f53065b7e1bb9a6185b | refs/heads/master | 2022-12-15T04:07:09.924966 | 2020-09-15T15:48:23 | 2020-09-15T15:48:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,953 | py | # The first step os to follow Charlie Bini's lead [here](https://github.com/TEAMSchools/sync_deanslist).
#
# However, we will combine this into one file rather than a module and a scipt file since we will be using Airflow, it will be easier to maintain in a single file.
#
# The first chunk comes from the config file. You'll need the following for the config:
# * `save_path`: where the data pulled from the DL API will be stored locally
# * `api_keys`: dict of DL API keys for each campus
# * `base_url`: string of DeansList base ULR for your schools
# * `end_points`: dict of DL endpoints to hit
import base64
import ast
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import pytz
import os
from os.path import expanduser
import json
import requests
import logging
import zipfile
from deanslist_api.dl import get_endpoint, row_count_branch
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import BranchPythonOperator, PythonOperator
from airflow.contrib.operators.file_to_gcs import FileToGoogleCloudStorageOperator
from airflow.models import Variable
"""
Config/DeansList variables
"""
home = expanduser("~")
SAVE_PATH = '{0}/gcs/data/deanslist/'.format(home)
BASE_URL = 'https://kippchicago.deanslistsoftware.com'
API_KEYS = Variable.get("dl_api_keys")
API_KEYS = ast.literal_eval(API_KEYS)
endpoints = [
## FLAT JSON - PARAMETERS ##
#{'endpoint':'/api/beta/export/get-behavior-data.php',
# 'name':'behavior',
# 'params':{'sdt':'2018-08-20', 'edt':yesterday_str, 'UpdatedSince':yesterday_str, 'IncludeDeleted':'Y'}},
## FLAT JSON - NO PARAMETERS ##
{'endpoint':'/api/v1/referrals', 'name':'referrals'},
#{'endpoint':'/api/beta/export/get-comm-data.php', 'name':'communication'},
#{'endpoint':'/api/v1/followups', 'name':'followups'},
#{'endpoint':'/api/beta/export/get-roster-assignments.php', 'name':'roster_assignments'},
#{'endpoint':'/api/v1/lists', 'name':'lists'},
#{'endpoint':'/api/v1/rosters', 'name':'rosters_all'},
{'endpoint':'/api/beta/export/get-users.php', 'name':'users'},
## CONTAIN NESTED JSON ##
{'endpoint':'/api/v1/incidents', 'name':'incidents'},
{'endpoint':'/api/v1/suspensions', 'name':'suspensions'},
# ## UNUSED ##
# {'endpoint':'/api/beta/export/get-homework-data.php', 'name':'homework', 'array_cols':[]},
# {'endpoint':'/api/v1/students', 'name':'students', 'nested':0},
# {'endpoint':'/api/v1/daily-attendance',
# 'name':'daily_attendance',
# 'params':{'sdt':'2018-08-20', 'edt':yesterday_str, 'include_iac':'Y'}},
# {'endpoint':'/api/v1/class-attendance', 'name':'class_attendance', 'nested':0},
# {'endpoint':'/api/v1/terms', 'name':'terms', 'nested':1},
# {'endpoint':'/api/beta/bank/get-bank-book.php', 'name':'points_bank', 'nested':1},
# {'endpoint':'/api/v1/lists/{ListID}', 'name':'list_sessions_all', 'nested':1},
# {'endpoint':'/api/v1/lists/{ListID}/{SessionID}', 'name':'list_sessions_id', 'nested':1},
# {'endpoint':'/api/v1/lists/{ListID}/{SessionDate}', 'name':'list_sessions_date', 'nested':1},
# {'endpoint':'/api/v1/rosters/(RosterID)', 'name':'rosters_single', 'nested':1},
]
"""
Airflow specific DAG set up #
"""
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2019, 8, 19),
"email": ["chaid@kippchicago.org"],
"email_on_failure": True,
"email_on_retry": False,
"retries": 2,
"retry_delay": timedelta(minutes=2),
"provide_context": True
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2019, 6, 2014),
}
dag = DAG(
"silo_dl_daily_endpoints_2019-08-19",
default_args=default_args,
schedule_interval='15 1 * * *')
with dag:
t1 = DummyOperator(task_id = "start_dl", dag=dag)
#Loop through endpoints
for i, e in enumerate(endpoints):
# get endpoint name
endpoint_name = e['name']
# get dict
get_enpdpoints_task_id = "get_{0}_dl_endpoint".format(endpoint_name)
branch_task_id = "branch_row_count_{0}_dl".format(endpoint_name)
file_to_gcs_task_id = "{0}_to_gcs".format(endpoint_name)
zero_branch_task_id = "{0}_zero_row".format(endpoint_name)
if 'params' in e.keys():
if 'sdt' in e['params']:
sdt = '{{ execution_date }}'
e['params']
t2 = PythonOperator(
task_id = get_enpdpoints_task_id,
python_callable = get_endpoint,
op_args = [e, SAVE_PATH, BASE_URL, API_KEYS],
)
t_branch = BranchPythonOperator(
task_id = branch_task_id,
python_callable = row_count_branch,
op_args = [get_enpdpoints_task_id, file_to_gcs_task_id, zero_branch_task_id],
trigger_rule = "all_done"
)
t_gcs = FileToGoogleCloudStorageOperator(
task_id = file_to_gcs_task_id,
google_cloud_storage_conn_id = 'gcs_silo',
bucket = "deanslist",
src = "{{ task_instance.xcom_pull(task_ids='" + get_enpdpoints_task_id + "', key='dl_file_path' )}}",
#dst = "TEST/" + endpoint_name + "/{{ task_instance.xcom_pull(task_ids='" + get_enpdpoints_task_id + "', key='dl_file_name') }}",
dst = endpoint_name + "/{{ task_instance.xcom_pull(task_ids='" + get_enpdpoints_task_id + "', key='dl_file_name') }}",
dag = dag
)
t_zero_row = DummyOperator(
task_id =zero_branch_task_id
)
t2.set_upstream(t1)
t2.set_downstream(t_branch)
t_branch.set_downstream(t_gcs)
t_branch.set_downstream(t_zero_row)
| [
"chaid@kippchicago.org"
] | chaid@kippchicago.org |
c2284400df2ddfb3b9094bda2309b792de2e3b21 | 514e779523e1f9bcc8efd33b7c7b437833a92b6f | /main/util/xmlUtils.py | 6b544aa5c475cb0126305f81e62a3333f116665b | [] | no_license | fushichenmu/test | db34ca3dd992641b706bd5a01e6e19d6774343e0 | d317f08bd44fe29859c313c96f275512acfd9b65 | refs/heads/master | 2020-09-08T10:58:59.085459 | 2019-11-19T03:44:10 | 2019-11-19T03:44:10 | 221,114,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | import xmltodict
import json
def xml2dict(xml_file,rootTable='xml'):
with open(xml_file,encoding = 'utf-8') as file_object :
all_the_xmlStr = file_object.read()
convertedDict = xmltodict.parse(all_the_xmlStr)
#ensure_ascii 设置为False 中文可以转换
jsonStr = json.dumps(convertedDict,ensure_ascii=False)
convertedDict = json.loads(jsonStr)
if rootTable:
convertedDict=convertedDict[rootTable]
return convertedDict
def dict2xml(self,file_Str_Dict):
self.init_data(file_Str_Dict)
if isinstance(self.data,str):
try:
self.data=json.loads(self.data)
except:
print('传入参数非json或dict类型,无法转换。')
# traceback.print_exc()
return None
return xmltodict.unparse(self.data,pretty=True,encoding='utf-8')
if __name__ == '__main__':
convertedDict = xml2dict('second_params.xml')
print(convertedDict)
| [
"xinhu519@aliyun.com"
] | xinhu519@aliyun.com |
0c75fb6bf1bbf0e8a76928ce29bf5b4f0a014996 | 6a4ebebbe0d7f81efc4f1749054a2ed7242c0e58 | /setup.py | 345a9c9073ffb87c82e6fbcc413a8d8703519644 | [
"LicenseRef-scancode-public-domain"
] | permissive | skylarker/granary | 6e192ecd2475febb3585728d5ba7afe34742107d | 2fd8ef017588b955e78606242ce582849cfd57ac | refs/heads/master | 2020-12-26T21:35:04.155528 | 2016-04-18T18:15:30 | 2016-04-18T18:15:30 | 56,891,160 | 1 | 0 | null | 2016-04-22T23:43:09 | 2016-04-22T23:43:09 | null | UTF-8 | Python | false | false | 1,868 | py | """setuptools setup module for granary.
Docs:
https://packaging.python.org/en/latest/distributing.html
http://pythonhosted.org/setuptools/setuptools.html
Based on https://github.com/pypa/sampleproject/blob/master/setup.py
"""
import unittest
from setuptools import setup, find_packages
from setuptools.command.test import ScanningLoader
class TestLoader(ScanningLoader):
def __init__(self, *args, **kwargs):
super(ScanningLoader, self).__init__(*args, **kwargs)
# webutil/test/__init__.py makes App Engine SDK's bundled libraries importable.
import oauth_dropins.webutil.test
setup(name='granary',
version='1.3.1',
description='Free yourself from silo API chaff and expose the sweet social data foodstuff inside in standard formats and protocols!',
long_description=open('README.rst').read(),
url='https://github.com/snarfed/granary',
packages=find_packages(),
include_package_data=True,
author='Ryan Barrett',
author_email='granary@ryanb.org',
license='Public domain',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'License :: Public Domain',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='facebook twitter google+ twitter activitystreams html microformats2 mf2 atom',
install_requires=[
# Keep in sync with requirements.txt!
'beautifulsoup4',
'html2text',
'jinja2',
'mf2py>=0.2.7',
'mf2util>=0.3.3',
'oauth-dropins>=1.3',
'requests<2.6.0',
],
test_loader='setup:TestLoader',
test_suite='granary.test',
)
| [
"git@ryanb.org"
] | git@ryanb.org |
92d2163dee74e757b3146c366d4d704400564aae | 6b65a6e495b60bd5610411ed382c7b1cb96d041c | /djangoApi/User/migrations/0009_delete_block.py | 9d9f8794d460dce13231b76cba25bf4a15fd61bf | [] | no_license | garggaurav526/moodflik-api | 04936ac94a26b1b816fc4aed1c825b8382186313 | 77b7953f55533847bb3785fbb3ca9004a6f032db | refs/heads/master | 2023-06-29T23:29:09.903636 | 2021-06-15T15:09:53 | 2021-06-15T15:09:53 | 363,434,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # Generated by Django 3.2.4 on 2021-06-15 12:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('User', '0008_block_blocked_user'),
]
operations = [
migrations.DeleteModel(
name='Block',
),
]
| [
"sm.gauravsingh@gmail.com"
] | sm.gauravsingh@gmail.com |
f2a797d1c550dbc9843f6fe14e7ad572536407a7 | a24b8446639f2157e2ecbdb7c11eda8e4e4344cc | /Configurations/UserConfigs/2018_AntiIso/ST_t_topConfig.py | 82fbbb685a0a6b4499f198945b017c0e1a347268 | [] | no_license | aloeliger/ReweightScheme | dcebc5651094d8d3da65885c59dae4070983624a | 05c9783fcf8e024fd26a6dbb9b1fbab4aee3c7f4 | refs/heads/master | 2021-12-11T16:10:12.881863 | 2021-08-27T21:02:21 | 2021-08-27T21:02:21 | 215,565,834 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | import ROOT
from Configurations.Weights.CrossSectionWeightingModule.CrossSectionWeight import crossSectionWeight
from Configurations.Weights.MuIDIsoReweightingModule.MuIDIsoWeight import muIDIsoWeight_2018 as muIDIsoWeight
from Configurations.Weights.MuTrackingWeightModule.MuTrackingWeight import muTrackingWeight_2018 as muTrackingWeight
from Configurations.Weights.PileupWeightingModule.PileupWeight import pileupWeight_2018 as pileupWeight
from Configurations.Weights.TauFakeRateWeightModule.eTauFakeRateWeight import eTauFakeRateWeight_2018 as eTauFakeRateWeight
from Configurations.Weights.TauIDModule.TauIDWeight import tauIDWeight_2018 as tauIDWeight
from Configurations.Weights.TriggerSFModule.TriggerWeight import triggerWeight_2018 as triggerWeight
from Configurations.Weights.bTaggingWeightModule.bTaggingWeight import bTaggingWeight_2018
from Configurations.Weights.PrefiringWeightModule.PrefiringWeight import PrefiringWeighting
from Configurations.ConfigDefinition import ReweightConfiguration
EWKConfiguration = ReweightConfiguration()
EWKConfiguration.name = "ST_t_top"
EWKConfiguration.inputFile = "/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/ST_t_top.root"
crossSectionWeight.sample = 'ST_t_top'
crossSectionWeight.year = '2018'
totalEventsFile = ROOT.TFile.Open("/data/aloeliger/SMHTT_Selected_2018_AntiIso_Deep/ST_t_top.root")
crossSectionWeight.totalEvents = totalEventsFile.eventCount.GetBinContent(2)
totalEventsFile.Close()
pileupWeight.year = '2018'
pileupWeight.sample = 'ST_t_top'
pileupWeight.InitPileupWeightings(pileupWeight)
EWKConfiguration.listOfWeights = [
crossSectionWeight,
muIDIsoWeight,
muTrackingWeight,
pileupWeight,
eTauFakeRateWeight,
#tauIDWeight,
triggerWeight,
bTaggingWeight_2018,
#PrefiringWeighting,
]
| [
"aloelige@cern.ch"
] | aloelige@cern.ch |
57434be9b73618bccc3f47e200f121e553a50545 | 51066a6a08b2d15b3e320293ff7a5e8a84e8267f | /Saving_ChairDataset.py | 3a2c232a7a4bfc0a2d5cfe04db46c2fbc8b4920f | [
"BSD-3-Clause"
] | permissive | cocolab-projects/reference-game-exploration | 40dd54a01b053e5390b16e9e55bbface6bb2bfe1 | eb20b8c6a7d81cfa8063bc89d9ad41a728f61ff9 | refs/heads/master | 2020-05-29T17:32:31.440573 | 2019-08-25T08:59:53 | 2019-08-25T08:59:53 | 189,275,222 | 0 | 1 | null | 2019-07-15T23:13:35 | 2019-05-29T18:06:40 | Python | UTF-8 | Python | false | false | 26,673 | py | from __future__ import print_function
import os
import json
from copy import deepcopy
from tqdm import tqdm
import numpy as np
import pandas as pd
from PIL import Image
from utils import OrderedCounter
from nltk import sent_tokenize, word_tokenize
import torch
import torch.utils.data as data
from torchvision import transforms
from collections import defaultdict
FILE_DIR = os.path.realpath(os.path.dirname(__file__))
RAW_DIR = os.path.join(FILE_DIR, 'data')
NUMPY_DIR = '/mnt/fs5/rona03/chairs_img_npy/numpy/numpy/'
SOS_TOKEN = '<sos>'
EOS_TOKEN = '<eos>'
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
TRAINING_PERCENTAGE = 64 / 100
TESTING_PERCENTAGE = 20 / 100
MIN_USED = 2
MAX_LEN = 10
def load_char_id_to_utterance_map(context_condition='all'):
with open(os.path.join(RAW_DIR, 'chairs2k_group_data.csv')) as fp:
df = pd.read_csv(fp)
df = df[df['correct'] == True]
df = df[df['communication_role'] == 'speaker']
if context_condition != 'all':
df = df[df['context_condition'] == context_condition]
chair_id = np.asarray(df['selected_chair'])
text = np.asarray(df['text'])
return chair_id, text
class ChairDataset(data.Dataset):
def __init__(self, vocab=None, split='Train', context_condition='all',
split_mode='easy', image_size=32, image_transform=None, dataVal=None):
super(ChairDataset, self).__init__()
if (split_mode == 'easy'):
split_mode = 'far'
if (split_mode == 'hard'):
split_mode = 'close'
assert split_mode in ['far', 'close']
self.names = np.load(os.path.join(NUMPY_DIR, 'names.npy'))
self.images = np.load(os.path.join(NUMPY_DIR, 'images.npy'))
chair_list = []
for i in self.names:
i = str(i.decode('utf-8'))
chair_list.append(i)
self.names = chair_list
self.context_condition = context_condition
self.split_mode = split_mode
self.split = split
print('loading CSV')
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data.npy')
if not os.path.exists(npy_path):
csv_path = os.path.join(RAW_DIR, 'chairs2k_group_data.csv')
df = pd.read_csv(csv_path)
df = df[df['correct'] == True]
df = df[df['communication_role'] == 'speaker']
# note that target_chair is always the chair
# so label is always 3
df = df[['chair_a', 'chair_b', 'chair_c', 'target_chair', 'text']]
df = df.dropna()
data = np.asarray(df)
data = self.clean_data(data, self.names)
np.save(npy_path, data)
else:
data = np.load(npy_path, allow_pickle=True)
# print(data)
# make sure rows reference existing images
# print(data)
self.data_ex = False
if (self.split == 'Train'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Train.npy')
if not os.path.exists(npy_path):
#exists
print()
else:
self.data_ex=True
if (self.split == 'Validation'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Val.npy')
if not os.path.exists(npy_path):
#exists
print()
else:
self.data_ex=True
if (self.split == 'Test'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Test.npy')
if not os.path.exists(npy_path):
#exists
print()
else:
self.data_ex=True
if (self.data_ex == False):
if self.split_mode == 'far':
# print(data)
# for each unique chair, divide all rows containing it into
# training and test sets
target_names = data[:, 3]
target_uniqs = np.unique(target_names)
new_data = []
print('splitting data into train and test')
pbar = tqdm(total=len(target_uniqs))
for target in target_uniqs:
data_i = data[target_names == target]
n_train = int(0.8 * len(data_i))
train_len = int(TRAINING_PERCENTAGE * len(data_i))
test_len = int(TESTING_PERCENTAGE * len(data_i))
if (self.split == 'Train'):
new_data.append(data_i[:train_len])
elif (self.split == 'Validation'):
new_data.append(data_i[train_len:-test_len])
else:
new_data.append(data_i[-test_len:])
pbar.update()
pbar.close()
new_data = np.concatenate(new_data, axis=0)
data = new_data
else: # split_mode == 'hard'
# for all chairs, divide into train and test sets
target_names = data[:, 3]
target_uniqs = np.unique(target_names)
train_len = len(TRAINING_PERCENTAGE * len(target_uniqs))
test_len = len(TESTING_PERCENTAGE * len(target_uniqs))
print('splitting data into train and test')
if (self.split == 'Train'):
splitter = np.in1d(target_names, target_uniqs[:train_len])
elif (self.split == 'Validatation'):
splitter = np.in1d(target_names, target_uniqs[train_len:-test_len])
else:
splitter = np.in1d(target_names, target_uniqs[-test_len:])
data = data[splitter]
if (self.split == 'Train'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Train.npy')
np.save(npy_path, data)
if (self.split == 'Validation'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Val.npy')
np.save(npy_path, data)
if (self.split == 'Test'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Test.npy')
np.save(npy_path, data)
else:
if (self.split == 'Train'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Train.npy')
if not os.path.exists(npy_path):
#exists
data = np.load(npy_path, allow_pickle=True)
if (self.split == 'Validation'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Val.npy')
if not os.path.exists(npy_path):
#exists
data = np.load(npy_path, allow_pickle=True)
if (self.split == 'Test'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Test.npy')
if not os.path.exists(npy_path):
#exists
data = np.load(npy_path, allow_pickle=True)
# replace target_chair with a label
labels = []
for i in range(len(data)):
if data[i, 3] == data[i, 0]:
labels.append(0)
elif data[i, 3] == data[i, 1]:
labels.append(1)
elif data[i, 3] == data[i, 2]:
labels.append(2)
else:
raise Exception('bad label')
labels = np.array(labels)
self.data = data
self.labels = labels
text = [d[-1] for d in data]
if vocab is None:
print('building vocab.')
self.vocab = self.build_vocab(text)
else:
self.vocab = vocab
self.w2i, self.i2w = self.vocab['w2i'], self.vocab['i2w']
self.vocab_size = len(self.w2i)
self.sos_token = SOS_TOKEN
self.eos_token = EOS_TOKEN
self.pad_token = PAD_TOKEN
self.unk_token = UNK_TOKEN
self.sos_index = self.w2i[self.sos_token]
self.eos_index = self.w2i[self.eos_token]
self.pad_index = self.w2i[self.pad_token]
self.unk_index = self.w2i[self.unk_token]
self.inputs, self.targets, self.lengths, self.positions, self.max_length \
= self.process_texts(text)
self.image_transform = image_transform
# print(self.vocab)
def build_vocab(self, texts):
w2c = defaultdict(int)
i2w, w2i = {}, {}
for text in texts:
tokens = preprocess_text(text)
for token in tokens:
w2c[token] += 1
indexCount = 0
for token in w2c.keys():
if w2c[token] >= MIN_USED:
w2i[token] = indexCount
i2w[indexCount] = token
indexCount += 1
w2i[SOS_TOKEN] = indexCount
w2i[EOS_TOKEN] = indexCount+1
w2i[UNK_TOKEN] = indexCount+2
w2i[PAD_TOKEN] = indexCount+3
i2w[indexCount] = SOS_TOKEN
i2w[indexCount+1] = EOS_TOKEN
i2w[indexCount+2] = UNK_TOKEN
i2w[indexCount+3] = PAD_TOKEN
vocab = {'i2w': i2w, 'w2i': w2i}
# print(i2w)
print("total number of words used at least twice: %d" % len(w2i))
print("total number of different words: %d" % len(w2c.keys()))
print("max number of word usage: %d" % max(w2c.values()))
return vocab
def clean_data(self, data, names):
new_data = []
for i in tqdm(range(len(data))):
chair_a, chair_b, chair_c, chair_target, _ = data[i]
if chair_a + '.png' not in names:
continue
if chair_b + '.png' not in names:
continue
if chair_c + '.png' not in names:
continue
if chair_target + '.png' not in names:
continue
new_data.append(data[i])
new_data = np.array(new_data)
return new_data
def process_texts(self, texts):
inputs, targets, lengths, positions = [], [], [], []
n = len(texts)
max_len = 0
for i in range(n):
text = texts[i]
tokens = word_tokenize(text)
input_tokens = [SOS_TOKEN] + tokens
target_tokens = tokens + [EOS_TOKEN]
assert len(input_tokens) == len(target_tokens)
length = len(input_tokens)
max_len = max(max_len, length)
inputs.append(input_tokens)
targets.append(target_tokens)
lengths.append(length)
for i in range(n):
input_tokens = inputs[i]
target_tokens = targets[i]
length = lengths[i]
input_tokens.extend([PAD_TOKEN] * (max_len - length))
target_tokens.extend([PAD_TOKEN] * (max_len - length))
input_tokens = [self.w2i.get(token, self.w2i[UNK_TOKEN]) for token in input_tokens]
target_tokens = [self.w2i.get(token, self.w2i[UNK_TOKEN]) for token in target_tokens]
pos = [pos_i+1 if w_i != self.pad_index else 0
for pos_i, w_i in enumerate(input_tokens)]
inputs[i] = input_tokens
targets[i] = target_tokens
positions.append(pos)
inputs = np.array(inputs)
targets = np.array(targets)
lengths = np.array(lengths)
positions = np.array(positions)
return inputs, targets, lengths, positions, max_len
def __len__(self):
return len(self.data)
def __getitem__(self, index):
inputs = self.inputs[index]
targets = self.targets[index]
length = self.lengths[index]
inputs = torch.from_numpy(inputs).long()
targets = torch.from_numpy(targets).long()
return targets, inputs, length
class Chairs_ReferenceGame(data.Dataset):
def __init__(self, vocab=None, split='Train', train=True, context_condition='all',
split_mode='easy', image_size=32, image_transform=None, dataVal=None):
super(Chairs_ReferenceGame, self).__init__()
if (split_mode == 'easy'):
split_mode = 'far'
if (split_mode == 'hard'):
split_mode = 'close'
assert split_mode in ['far', 'close']
self.names = np.load(os.path.join(NUMPY_DIR, 'names.npy'))
for i in self.names:
print(i)
self.images = np.load(os.path.join(NUMPY_DIR, 'images.npy'))
chair_list = []
for i in self.names:
i = str(i.decode('utf-8'))
chair_list.append(i)
self.names = chair_list
self.context_condition = context_condition
self.split_mode = split_mode
self.train = train
self.split = split
print('loading CSV')
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data.npy')
if not os.path.exists(npy_path):
csv_path = os.path.join(RAW_DIR, 'chairs2k_group_data.csv')
df = pd.read_csv(csv_path)
df = df[df['correct'] == True]
df = df[df['communication_role'] == 'speaker']
# note that target_chair is always the chair
# so label is always 3
df = df[['chair_a', 'chair_b', 'chair_c', 'target_chair', 'text']]
df = df.dropna()
data = np.asarray(df)
data = self.clean_data(data, self.names)
np.save(npy_path, data)
else:
data = np.load(npy_path, allow_pickle=True)
self.data_ex = False
if (self.split == 'Train'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Train.npy')
if not os.path.exists(npy_path):
#exists
print()
else:
self.data_ex=True
if (self.split == 'Validation'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Val.npy')
if not os.path.exists(npy_path):
#exists
print()
else:
self.data_ex=True
if (self.split == 'Test'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Test.npy')
if not os.path.exists(npy_path):
#exists
print()
else:
self.data_ex=True
if (self.data_ex == False):
if self.split_mode == 'far':
# print(data)
# for each unique chair, divide all rows containing it into
# training and test sets
target_names = data[:, 3]
target_uniqs = np.unique(target_names)
new_data = []
print('splitting data into train and test')
pbar = tqdm(total=len(target_uniqs))
for target in target_uniqs:
data_i = data[target_names == target]
n_train = int(0.8 * len(data_i))
train_len = int(TRAINING_PERCENTAGE * len(data_i))
test_len = int(TESTING_PERCENTAGE * len(data_i))
if (self.split == 'Train'):
new_data.append(data_i[:train_len])
elif (self.split == 'Validation'):
new_data.append(data_i[train_len:-test_len])
else:
new_data.append(data_i[-test_len:])
pbar.update()
pbar.close()
new_data = np.concatenate(new_data, axis=0)
data = new_data
else: # split_mode == 'hard'
# for all chairs, divide into train and test sets
target_names = data[:, 3]
target_uniqs = np.unique(target_names)
train_len = len(TRAINING_PERCENTAGE * len(target_uniqs))
test_len = len(TESTING_PERCENTAGE * len(target_uniqs))
print('splitting data into train and test')
if (self.split == 'Train'):
splitter = np.in1d(target_names, target_uniqs[:train_len])
elif (self.split == 'Validatation'):
splitter = np.in1d(target_names, target_uniqs[train_len:-test_len])
else:
splitter = np.in1d(target_names, target_uniqs[-test_len:])
data = data[splitter]
if (self.split == 'Train'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Train.npy')
np.save(npy_path, data)
if (self.split == 'Validation'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Val.npy')
np.save(npy_path, data)
if (self.split == 'Test'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Test.npy')
np.save(npy_path, data)
else:
if (self.split == 'Train'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Train.npy')
if not os.path.exists(npy_path):
#exists
data = np.load(npy_path, allow_pickle=True)
if (self.split == 'Validation'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Val.npy')
if not os.path.exists(npy_path):
#exists
data = np.load(npy_path, allow_pickle=True)
if (self.split == 'Test'):
npy_path = os.path.join(RAW_DIR, 'chairs2k_group_data_Test.npy')
if not os.path.exists(npy_path):
#exists
data = np.load(npy_path, allow_pickle=True)
# replace target_chair with a label
labels = []
for i in range(len(data)):
if data[i, 3] == data[i, 0]:
labels.append(0)
elif data[i, 3] == data[i, 1]:
labels.append(1)
elif data[i, 3] == data[i, 2]:
labels.append(2)
else:
raise Exception('bad label')
labels = np.array(labels)
self.data = data
self.labels = labels
text = [d[-1] for d in data]
if vocab is None:
print('building vocab.')
self.vocab = self.build_vocab(text)
else:
self.vocab = vocab
self.w2i, self.i2w = self.vocab['w2i'], self.vocab['i2w']
self.vocab_size = len(self.w2i)
self.sos_token = SOS_TOKEN
self.eos_token = EOS_TOKEN
self.pad_token = PAD_TOKEN
self.unk_token = UNK_TOKEN
self.sos_index = self.w2i[self.sos_token]
self.eos_index = self.w2i[self.eos_token]
self.pad_index = self.w2i[self.pad_token]
self.unk_index = self.w2i[self.unk_token]
self.inputs, self.targets, self.lengths, self.positions, self.max_length \
= self.process_texts(text)
self.image_transform = image_transform
# print(self.vocab)
def build_vocab(self, texts):
w2c = defaultdict(int)
i2w, w2i = {}, {}
for text in texts:
tokens = preprocess_text(text)
for token in tokens:
w2c[token] += 1
indexCount = 0
for token in w2c.keys():
if w2c[token] >= MIN_USED:
w2i[token] = indexCount
i2w[indexCount] = token
indexCount += 1
w2i[SOS_TOKEN] = indexCount
w2i[EOS_TOKEN] = indexCount+1
w2i[UNK_TOKEN] = indexCount+2
w2i[PAD_TOKEN] = indexCount+3
i2w[indexCount] = SOS_TOKEN
i2w[indexCount+1] = EOS_TOKEN
i2w[indexCount+2] = UNK_TOKEN
i2w[indexCount+3] = PAD_TOKEN
vocab = {'i2w': i2w, 'w2i': w2i}
# print(i2w)
print("total number of words used at least twice: %d" % len(w2i))
print("total number of different words: %d" % len(w2c.keys()))
print("max number of word usage: %d" % max(w2c.values()))
return vocab
def clean_data(self, data, names):
new_data = []
for i in tqdm(range(len(data))):
chair_a, chair_b, chair_c, chair_target, _ = data[i]
if chair_a + '.png' not in names:
continue
if chair_b + '.png' not in names:
continue
if chair_c + '.png' not in names:
continue
if chair_target + '.png' not in names:
continue
new_data.append(data[i])
new_data = np.array(new_data)
return new_data
def process_texts(self, texts):
inputs, targets, lengths, positions = [], [], [], []
n = len(texts)
max_len = 0
for i in range(n):
text = texts[i]
tokens = word_tokenize(text)
input_tokens = [SOS_TOKEN] + tokens
target_tokens = tokens + [EOS_TOKEN]
assert len(input_tokens) == len(target_tokens)
length = len(input_tokens)
max_len = max(max_len, length)
inputs.append(input_tokens)
targets.append(target_tokens)
lengths.append(length)
for i in range(n):
input_tokens = inputs[i]
target_tokens = targets[i]
length = lengths[i]
input_tokens.extend([PAD_TOKEN] * (max_len - length))
target_tokens.extend([PAD_TOKEN] * (max_len - length))
input_tokens = [self.w2i.get(token, self.w2i[UNK_TOKEN]) for token in input_tokens]
target_tokens = [self.w2i.get(token, self.w2i[UNK_TOKEN]) for token in target_tokens]
pos = [pos_i+1 if w_i != self.pad_index else 0
for pos_i, w_i in enumerate(input_tokens)]
inputs[i] = input_tokens
targets[i] = target_tokens
positions.append(pos)
inputs = np.array(inputs)
targets = np.array(targets)
lengths = np.array(lengths)
positions = np.array(positions)
return inputs, targets, lengths, positions, max_len
def __len__(self):
return len(self.data)
def __getitem__(self, index):
chair_a, chair_b, chair_c, chair_target, _ = self.data[index]
label = self.labels[index]
chair_a = chair_a + '.png'
chair_b = chair_b + '.png'
chair_c = chair_c + '.png'
# chair_target = chair_target + '.png'
chair_a_pre = chair_a
chair_b_pre = chair_b
chair_c_pre = chair_c
chair_names = list(self.names)
index_a = chair_names.index(chair_a)
index_b = chair_names.index(chair_b)
index_c = chair_names.index(chair_c)
# index_target = chair_names.index(chair_target)
chair_a_np = self.images[index_a][0]
chair_b_np = self.images[index_b][0]
chair_c_np = self.images[index_c][0]
# chair_target_np = self.images[index_target][0]
chair_a_pt = torch.from_numpy(chair_a_np).unsqueeze(0)
chair_a = transforms.ToPILImage()(chair_a_pt).convert('RGB')
chair_b_pt = torch.from_numpy(chair_b_np).unsqueeze(0)
chair_b = transforms.ToPILImage()(chair_b_pt).convert('RGB')
chair_c_pt = torch.from_numpy(chair_c_np).unsqueeze(0)
chair_c = transforms.ToPILImage()(chair_c_pt).convert('RGB')
if self.image_transform is not None:
chair_a = self.image_transform(chair_a)
chair_b = self.image_transform(chair_b)
chair_c = self.image_transform(chair_c)
inputs = self.inputs[index]
targets = self.targets[index]
length = self.lengths[index]
inputs = torch.from_numpy(inputs).long()
targets = torch.from_numpy(targets).long()
trans = transforms.ToTensor()
if (label == 2):
temp = chair_a
temp1 = chair_b
chair_a = temp1
chair_b = temp
elif (label == 3):
temp = chair_a
temp1 = chair_c
chair_a = temp1
chair_c = temp
return trans(chair_a), trans(chair_b), trans(chair_c), inputs, length
def preprocess_text(text):
text = text.lower()
tokens = word_tokenize(text)
i = 0
while i < len(tokens):
while (tokens[i] != '.' and '.' in tokens[i]):
tokens[i] = tokens[i].replace('.','')
while (tokens[i] != '\'' and '\'' in tokens[i]):
tokens[i] = tokens[i].replace('\'','')
while('-' in tokens[i] or '/' in tokens[i]):
if tokens[i] == '/' or tokens[i] == '-':
tokens.pop(i)
i -= 1
if '/' in tokens[i]:
split = tokens[i].split('/')
tokens[i] = split[0]
i += 1
tokens.insert(i, split[1])
if '-' in tokens[i]:
split = tokens[i].split('-')
tokens[i] = split[0]
i += 1
tokens.insert(i, split[1])
if tokens[i-1] == '/' or tokens[i-1] == '-':
tokens.pop(i-1)
i -= 1
if '/' in tokens[i-1]:
split = tokens[i-1].split('/')
tokens[i-1] = split[0]
i += 1
tokens.insert(i-1, split[1])
if '-' in tokens[i-1]:
split = tokens[i-1].split('-')
tokens[i-1] = split[0]
i += 1
tokens.insert(i-1, split[1])
if tokens[i].endswith('er'):
tokens[i] = tokens[i][:-2]
i += 1
tokens.insert(i, 'er')
if tokens[i].endswith('est'):
tokens[i] = tokens[i][:-3]
i += 1
tokens.insert(i, 'est')
if tokens[i].endswith('ish'):
tokens[i] = tokens[i][:-3]
i += 1
tokens.insert(i, 'est')
if tokens[i-1].endswith('er'):
tokens[i-1] = tokens[i-1][:-2]
i += 1
tokens.insert(i-1, 'er')
if tokens[i-1].endswith('est'):
tokens[i-1] = tokens[i-1][:-3]
i += 1
tokens.insert(i-1, 'est')
if tokens[i-1].endswith('ish'):
tokens[i-1] = tokens[i-1][:-3]
i += 1
tokens.insert(i-1, 'est')
i += 1
replace = {'redd':'red', 'gren': 'green', 'whit':'white', 'biege':'beige', 'purp':'purple', 'olve':'olive', 'ca':'can', 'blu':'blue', 'orang':'orange', 'gray':'grey'}
for i in range(len(tokens)):
if tokens[i] in replace.keys():
tokens[i] = replace[tokens[i]]
while '' in tokens:
tokens.remove('')
return tokens
| [
"100021710@mvla.net"
] | 100021710@mvla.net |
a1f9e3e7b5b89f5f0a117846b4bfe0908cfe2503 | d7000038b837a00bf7575f4616fa7f555e5afc8c | /Python/ping.py | 09dc3cf03acbd81e2eaf2be5530825d5a2b40028 | [] | no_license | payshangjj/code-repositories | aa39c55ed28573586050d6fca91b61ca3d4b4289 | bbee90553f68e40c25c0bc939836cff0a36d74df | refs/heads/master | 2020-12-25T14:57:58.397251 | 2016-08-24T16:24:11 | 2016-08-24T16:24:11 | 66,099,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | #!/usr/bin/env python
#coding:utf-8
import os
import threading
numlock =threading.RLock() #线程锁 操作在线数时必须要请求此锁
prlock=threading.RLock() #输出锁 子线程在显示器上输出时必须请求此锁(防止输出错位)
zxs=0 #在线数
class pings(threading.Thread):
def __init__(self,num,interval):
threading.Thread.__init__(self)
self.nums=num
self.inter=interval
self.thread_stop=False
self.ns=0
def run(self):
global zxs
start=self.nums
while start<255:
ret=os.system('ping -c 1 -W 1 192.168.1.%d >/dev/null' % start)
if not ret:
prlock.acquire()#请求输出锁
print 'ping 192.168.1.%d ok' % start
prlock.release()#释放输出锁
self.ns +=1
start+=self.inter
numlock.acquire() #请求数操作锁
zxs+=self.ns
numlock.release() #释放数操作锁
def pingt():
s=255
r=s-1
threads=[]
for i in range(1,s):
t=pings(i,r)
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
global zxs
print zxs,'个ip在线' #输出在线数
if __name__=='__main__':
pingt()
| [
"yulei024@163.com"
] | yulei024@163.com |
0a889ae77ecf14256767e53fff24bf1655e68788 | ee50502df12ffbffd5312161065fb8fa02085445 | /030.substring-with-concatenation-of-all-words/mine.py | 922e678b787da4f12d950511929c6c4460778919 | [] | no_license | IsabellaHuan/lc-all-solutions | eb08cdf723489b56521fe131bc5c1b4a399897ec | c6a136b0498a85fb38e934b5a4af42f8eef9ce1a | refs/heads/master | 2020-11-24T19:36:15.414404 | 2020-03-23T03:24:22 | 2020-03-23T03:24:22 | 228,314,954 | 0 | 0 | null | 2019-12-16T06:09:08 | 2019-12-16T06:09:07 | null | UTF-8 | Python | false | false | 154 | py | #coding=utf-8
# author: Huan Shuwen
# time : 2019/12/18 下午8:14
# file : mine
"""
NOTICE:
找到连续的所有给定单词组合的其实坐标
"""
| [
"huanshuwen@didiglobal.com"
] | huanshuwen@didiglobal.com |
97a54b16689893b027d5f64ac83646f74cec9194 | a92efd5efcbf56db03cbb2c25fa5d89014ca19a6 | /aplicaciones/blog/migrations/0001_initial.py | 6b8852d52e702c870480faef8662818eff19dd54 | [] | no_license | nicopereiran7/django_netflix | 5d0b83db2ce7f9e641408e7c9330bfadd3ca2d48 | b0405059f56f4e66826dd0e53ade3e4cf893f9bc | refs/heads/master | 2023-02-12T12:55:01.022783 | 2021-01-14T16:41:30 | 2021-01-14T16:41:30 | 329,672,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Generated by Django 3.1.4 on 2021-01-12 21:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='Nombre de la Categoria')),
('estado', models.BooleanField(default=True, verbose_name='Categoria Activada/Categoria No Activada')),
('fecha_creacion', models.DateField(auto_now_add=True, verbose_name='Fecha Creacion')),
],
options={
'verbose_name': 'Categoria',
'verbose_name_plural': 'Categorias',
},
),
]
| [
"nicopereiran7n7n7@gmail.com"
] | nicopereiran7n7n7@gmail.com |
bd298e7985f0b09e4222e354e3f0afc394e96595 | b47f2e3f3298388b1bcab3213bef42682985135e | /experiments/heat-3d/tmp_files/1539.py | 27d7e85b4987f69ec0aa2f1e52f9247dec53052f | [
"BSD-2-Clause"
] | permissive | LoopTilingBenchmark/benchmark | 29cc9f845d323431e3d40e878cbfc6d1aad1f260 | 52a3d2e70216552a498fd91de02a2fa9cb62122c | refs/heads/master | 2020-09-25T09:45:31.299046 | 2019-12-04T23:25:06 | 2019-12-04T23:25:06 | 225,975,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/heat-3d/tmp_files/1539.c')
procedure('kernel_heat_3d')
loop(0)
tile(0,2,8,2)
tile(0,4,8,4)
tile(0,6,8,6)
tile(1,2,8,2)
tile(1,4,8,4)
tile(1,6,8,6)
| [
"nashenruoyang@163.com"
] | nashenruoyang@163.com |
d1cc019f002492e4ca2f30241964186934bb36af | 930309163b930559929323647b8d82238724f392 | /abc108_b.py | c72c059e12f6e5358657caa002cf6e7a6a309c3c | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py |
x1, y1, x2, y2 = map(int, input().split())
dx = x2 - x1
dy = y2 - y1
x3 = x2 - dy
y3 = y2 + dx
x4 = x3 - dx
y4 = y3 - dy
print("%d %d %d %d" % (x3, y3, x4, y4))
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
2b19110914a02de6d4d0e861df33969b3d7063a9 | a82bc1f734089e337f9f96cb42e5b3f0360fede9 | /tests/devices/bmp280_test.py | 7f2d8568bfd2ca23b690a362d01917f8ba0ede16 | [] | no_license | zhaoshenhao/chickadee | ce5e14ca348ff165c52b38d437bdc0aa6f80e8d8 | d78e26ea0250825519a0ca5280bf5805f09deba3 | refs/heads/main | 2023-03-22T08:56:24.566975 | 2021-03-13T04:02:58 | 2021-03-13T04:02:58 | 326,502,387 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # BMP280 气压,温度,海拔(误差较大)
from bmp280 import Bmp280
import uasyncio as asyncio
import time
async def demo_async():
ds = Bmp280(16, 17)
for _ in range(0,2):
print(await ds.async_sensor_values())
print(ds.get_temperature())
print(ds.get_pressure())
print(ds.get_altitude())
await asyncio.sleep(1)
def demo():
ds = Bmp280(16, 17)
for _ in range(0,2):
print(ds.sensor_values)
print(ds.get_temperature())
print(ds.get_pressure())
print(ds.get_altitude())
time.sleep(1)
if __name__ == "__main__":
demo()
import uasyncio as asyncio
asyncio.run(demo_async())
| [
"syspole1@gmail.com"
] | syspole1@gmail.com |
e075b4e7eb1d68b584d157154f05e397e8d0397a | 22f66e6fdf32d22bab056cbd1194afa40b367358 | /201902yusuan/probudget.py | 3f59adbed4ab1c85b83222941d258cd448ac6697 | [] | no_license | KingJoan/projectExample | 4795b01c91abb919a6ffdf0e019d16f3f8dec71a | 2f6915ccf7404252d6914197f97ec1e00bda1dc2 | refs/heads/master | 2020-04-29T14:17:23.605976 | 2019-03-18T02:17:37 | 2019-03-18T02:17:37 | 176,187,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,614 | py | # -*- coding: utf-8 -*-
# @Time : 2019/2/26 14:43
# @Author : Joan
# @Email : sj11249187@126.com
# @File : probudget.py
# @Software: PyCharm
import numpy as np
import json
import pandas as pd
import time
def readJson(path, filename):
with open(path + '\\' + filename, encoding='utf-8') as f:
data = json.load(f)
data = pd.DataFrame(data['RECORDS'])
return data
def qd_de_mix(df):
'''
将项(索引)与所套定额(索引)绑定在一起,项为键,定额为值
:param df: 项目案例数据dataframe格式,且只留项与定额
:return:
'''
# 求zmkind 差值,zmkind中10-定额;50-项,项
zmKind_diff = np.diff(df.zmKind)
# 取连续值的最后一个的索引
pos, = np.where(zmKind_diff)
# 如果差值最后一位等于0,则索引pos最后增加zmKind最后一位的索引
if (len(zmKind_diff) > 1) and (zmKind_diff[-1] == 0):
pos = np.append(pos, len(df.zmKind)-1)
# 将索引pos转换为nx2的数组,[i][0]表示项,[i][1]表示定额
if len(pos) % 2 != 0:
pos = pos[:-1]
pos_array = pos.reshape(len(pos)//2, 2)
# 将数组转换为字典:键:项(连续项的最后一项)索引,值:定额(连续定额的最后一项)索引
qd_de_last = {}
for i in range(len(pos_array)):
qd_de_last[pos_array[i][0]] = pos_array[i][1]
# 定额索引补齐: 键:项索引,值:项所套全部定额索引
qd_de_defill = {}
for qd, de in qd_de_last.items():
if de - qd > 1:
qd_de_defill[qd] = list(range(qd+1, de+1))
else:
qd_de_defill[qd] = [de]
return qd_de_defill
def qd_no_fill(df, qd_no_defill):
'''
项编号补齐
:return:
'''
# 清单索引减一函数
sub = lambda x: x-1
for qdsy in qd_no_defill.keys():
# qdsyup临时存放清单索引值以便于向上索引,初始值置为当前清单索引
qdsyup = qdsy
# 当前清单编号
qdno = df.loc[qdsy, 'no']
# 上一级清单编号,初始值置为当前清单编号
no = [str(qdno)]
n = df.loc[qdsy, 'level']
while n > 1:
qdsyup = sub(qdsyup)
qdupl = df.loc[qdsyup, 'level']
if qdupl < n:
n -= 1
if (qdupl == n) and (df.loc[qdsyup, 'zmKind'] != 10 ):
no.append(str(df.loc[qdsyup, 'no']))
nofill = no[::-1]
if len(nofill) == 0:
df.loc[qdsy, 'nofill'] = ''
else:
df.loc[qdsy, 'nofill'] = '-'.join(nofill)
return df
def same_qd(df, qd_de_defill):
'''
相同项所套定额
:param df:
:param qd_de_defill: 项索引--套用的定额索引字典
:return: dataframe
'''
returnData = pd.DataFrame([], columns=['no', 'name', 'mark', 'unit', 'unitPrice',
'costfileName', 'editTime', 'roadLevel', 'workSpale'])
# 键:项索引 值:项name
qd_name = {}
for qdsy in qd_de_defill.keys():
qd_name[qdsy] = df.loc[qdsy, 'name']
# 键:项name 值:项索引 相同项名-项索引
name_qd = {}
for qdsy, name in qd_name.items():
name_qd.setdefault(name, []).append(qdsy)
# 键:项索引,值:定额编号
qdsy_deno = {}
for k, v in qd_de_defill.items():
qdsy_deno[k] = []
for vi in v:
qdsy_deno[k].append(df.loc[vi, 'no'])
# 键:定额编号 值:相同定额的项索引列表
deno_qdsylist = {}
for k, v in qdsy_deno.items():
deno_qdsylist.setdefault(tuple(v), []).append(k)
# 去除相同定额 键:项name 值:项索引
qdname_qdsy = {}
for kname, vqdsy in name_qd.items():
qdname_qdsy[kname] = []
for v in deno_qdsylist.values():
if set(vqdsy) & set(v):
if len(v) > 1:
vs = []
for vi in v:
if df.loc[vi, 'name'] == kname:
vs.append(vi)
qdname_qdsy[kname].append(vs[0])
else:
qdname_qdsy[kname].extend(v)
# 对每一个sameqdname 查找对应的qdsy
for qdname in qdname_qdsy.keys():
# 对每一个qdname查找对应qdsy列表
qdsylist = qdname_qdsy[qdname]
for i in range(len(qdsylist)):
qdsy = qdsylist[i]
desys = qd_de_defill[qdsy]
qd = [df.loc[qdsy, 'nofill'], df.loc[qdsy, 'name'], df.loc[qdsy, 'mark'], df.loc[qdsy, 'unit'], df.loc[qdsy, 'unitPrice'],
df.loc[qdsy, 'costfileName'], df.loc[qdsy, 'editTime'], df.loc[qdsy, 'roadLevel'],
df.loc[qdsy, 'workSpale']]
returnData.loc[str(qdname) + '@项%d' % (i + 1), :] = qd
for j in range(len(desys)):
desy = desys[j]
de = [df.loc[desy, 'no'], df.loc[desy, 'name'], df.loc[desy, 'mark'], df.loc[desy, 'unit'], df.loc[desy, 'unitPrice'],
df.loc[desy, 'costfileName'], df.loc[desy, 'editTime'], df.loc[desy, 'roadLevel'],
df.loc[desy, 'workSpale']]
returnData.loc[str(qdname) + '@项%d套定额%d' % (i + 1, j + 1), :] = de
return returnData
if __name__ == '__main__':
path = r'D:\toone\项目案例\201902概预算\第二次\概预算'
to_path = r'D:\toone\项目案例\201902概预算\第二次\概预算\结果v2'
data = readJson(path, '概预算.json')
data.loc[data['mark'] == "计算项", 'zmKind'] = 10
print('程序执行开始时间:%s' % time.asctime())
# data.head(2000).to_excel(to_path + '\data.xlsx')
data = data[data['costfileName'].str.contains('预')]
data = data[data['zmKind'].isin([10, 50])]
data['name'].fillna('', inplace=True)
data['roadLevel'].fillna('空', inplace=True)
data['workSpale'].fillna('空', inplace=True)
for m in list(set(data['roadLevel'])):
writer = pd.ExcelWriter(to_path + '\%s.xlsx' % m, engine='openpyxl')
for n in list(set(data['workSpale'])):
datadf = data[(data['roadLevel'] == m) & (data['workSpale'] == n)]
if datadf.empty:
continue
else:
df = datadf.reset_index()
qddefill = qd_de_mix(df)
qdnofill = qd_no_fill(df, qddefill)
result = same_qd(qdnofill, qddefill)
result.to_excel(writer, sheet_name='%s' % n)
writer.close()
print('程序执行完成时间:%s' % time.asctime())
| [
"jinj@toone.com.cn"
] | jinj@toone.com.cn |
85133e30be06069297794779f1dc75b826ce7fd2 | 20f5601bcbbc1bf39c42256188ce76cc06b47238 | /manage.py | 23fd73d777219bb52e9e2753a1a94928d0f5b7dc | [] | no_license | Earthman12/Zemax_ZRD_File_Wrapper | a42e5f673cc4b37c206c6fec7e0dfed4a74774c4 | 72f0580eb9a2835444270d02f01ae87a8501178b | refs/heads/main | 2023-06-18T05:57:04.343687 | 2021-07-21T17:59:38 | 2021-07-21T17:59:38 | 388,203,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Zemax_ZRD_File_Wrapper.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"dteran@lpl.arizona.edu"
] | dteran@lpl.arizona.edu |
918f237882bc12ca5169f08d0b2a86dd2b388b12 | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /Python/__function/functions1/functions1/23 keyword non-keyword argument.py | 9adc44c2843f16255ab0ee092696537a2eac3237 | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 553 | py | #3.keyword arguments: During fn call,using parameter name,passing value
#4.non-keyword arguments:During fn call,without parameter name,passing value
def display(branch,code):
print(branch,code)
display("CSE","05") #non-keyword argument
display(branch="ECE",code="04") #keyword argument (using parameter name)
display(code="02",branch="EEE") #keyword argument
#display(code="12","IT")
#default and non-default related to fn definition
#key-word and non-keyword relatd to fn call
#Note: After keyword argument,we cannot have nonkeyword argument
| [
"46024570+rahuldbhadange@users.noreply.github.com"
] | 46024570+rahuldbhadange@users.noreply.github.com |
56563a4e5c488b9601edba375fd206aab7e9b771 | b3be33fa7bea97e8bbe787a3665b5c65150abc00 | /week_1/Functions/Local_Variable_Scope.py | 54893ec763630221802c98cef38895f95bc32314 | [] | no_license | sssssh/cmu15-112 | dcef53e27a5628c0d4ea51188074defb4baf8dcc | f2ae6c4536bf0c109171fbfd57bc5ea9eb3abfee | refs/heads/master | 2021-07-12T20:17:26.371761 | 2017-10-18T03:15:55 | 2017-10-18T03:15:55 | 107,097,261 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | def f(x):
print("In f, x =", x)
x += 5
return x
def g(x):
return f(x * 2) + f(x * 3)
print(g(2))
def f(x):
print("In f, x =", x)
x += 7
return round(x / 3)
def g(x):
x *= 10
return 2 * f(x)
def h(x):
x += 3
return f(x + 4) + g(x)
print(h(f(1)))
| [
""
] | |
9c0fb83c7a4b11f4d980fcbca72defa76f99e0b6 | f5dde423f9c6339076d2927f5e259b45e0171baf | /mdgen/constants.py | c6f3df841b8b1f5464d407808c5417becb6906e5 | [
"Apache-2.0",
"MIT"
] | permissive | parthpandyappp/python-random-markdown-generator | 5ab399d0f06952c082386e7c77355e08858fa180 | cd02ff0991cd35d7bac8455298a7eb676efc46eb | refs/heads/master | 2022-12-20T03:42:13.685124 | 2020-10-05T08:08:31 | 2020-10-05T08:08:31 | 300,171,307 | 0 | 0 | Apache-2.0 | 2020-10-01T06:28:45 | 2020-10-01T06:28:44 | null | UTF-8 | Python | false | false | 559 | py | from os import linesep
MARKDOWN_HEADER = '#'
MARKDOWN_HEADER_ALT = '-'
LINESEPARATOR = linesep
INDENTATION = '\t'
MARKDOWN_BOLD = '**'
MARKDOWN_ITALIC = '*'
MARKDOWN_ITALIC_ALT = '_'
MARKDOWN_HORIZONTAL_RULE_HYPHENS = '---'
MARKDOWN_HORIZONTAL_RULE_ASTERISKS = '***'
MARKDOWN_HORIZONTAL_RULE_UNDERSCORES = '___'
MARKDOWN_UNORDERED_LISTS_ASTERISKS = '*'
MARKDOWN_UNORDERED_LISTS_MINUS = '-'
MARKDOWN_UNORDERED_LISTS_PLUS = '+'
MARKDOWN_TABLE_COL_SEPARATOR = '|'
MARKDOWN_TABLE_ROW_SEPARATOR = '-'
MARKDOWN_COMMENT_OPEN = '<!--'
MARKDOWN_COMMENT_CLOSE = '-->'
| [
"ignisda2002@gmail.com"
] | ignisda2002@gmail.com |
ac924d968bdb28866bff06959c39b1bd0944dab0 | 866581782ea07678da08291f034922bd5ae6db77 | /section04_3/section04_3/settings.py | b0dac60e963466c0c9ad98b0a906ebeac25d3446 | [] | no_license | khs50851/git-crawling | ee8967d6d8ba1e70b1be5f872008247f9955d76c | 9d29234f9cb025a05b782995abc54a87048445f9 | refs/heads/master | 2023-02-03T21:54:42.214804 | 2020-12-23T06:22:59 | 2020-12-23T06:22:59 | 320,491,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | # Scrapy settings for section04_2 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'section04_3'
SPIDER_MODULES = ['section04_3.spiders']
NEWSPIDER_MODULE = 'section04_3.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'section04_2 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'section04_2.middlewares.Section042SpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'section04_2.middlewares.Section042DownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'section04_3.pipelines.TestSpiderPipeline': 300, # 이 숫자는 낮을수록 먼저 실행됨
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"khs50851@naver.com"
] | khs50851@naver.com |
08a51725bc857422bfd0211437ee4d92a0c0d3ef | 8655909600b85519b34b2d56bc498303b5f49e6d | /tests/unit/copying_manager_tests/copying_manager_new_test.py | ff5fcdb0014e171c277a16e957d3e6944f4b413f | [
"Apache-2.0"
] | permissive | scalyr/scalyr-agent-2 | 7ba794f7d1a7d80c07a3150c6d20a82b311ac9a2 | 5099a498edc47ab841965b483c2c32af49eb7dae | refs/heads/master | 2023-08-21T12:45:40.711445 | 2023-08-16T12:06:19 | 2023-08-16T12:06:19 | 23,852,161 | 75 | 76 | Apache-2.0 | 2023-09-11T20:44:43 | 2014-09-09T22:18:19 | Python | UTF-8 | Python | false | false | 26,179 | py | # Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import time
import os
import platform
import sys
if False:
from typing import Dict
from typing import Tuple
try:
import pathlib
except ImportError:
import pathlib2 as pathlib # type: ignore
import pytest
from scalyr_agent import scalyr_logging
from scalyr_agent.test_base import ScalyrTestCase
from tests.unit.copying_manager_tests.common import (
CopyingManagerCommonTest,
TestableCopyingManager,
TestableCopyingManagerFlowController,
TestableLogFile,
TestEnvironBuilder,
TestingConfiguration,
)
from scalyr_agent import util as scalyr_util
import six
from six.moves import range
import mock
log = scalyr_logging.getLogger(__name__)
log.setLevel(scalyr_logging.DEBUG_LEVEL_0)
# mock library does not have PropertyMock in python 2.6, so we just keep it None.
if sys.version_info >= (2, 7):
PropertyMock = mock.PropertyMock
else:
PropertyMock = None
def pytest_generate_tests(metafunc):
"""
Run all tests for each configuration.
"""
if "worker_type" in metafunc.fixturenames:
test_params = [["thread", 1, 1], ["thread", 2, 2]]
# if the OS is not Windows / OS X and python version > 2.7 then also do the multiprocess workers testing.
if platform.system() not in ["Windows", "Darwin"] and sys.version_info >= (
2,
7,
):
test_params.extend([["process", 1, 1], ["process", 2, 2]])
metafunc.parametrize(
"worker_type, workers_count, worker_sessions_count", test_params
)
class CopyingManagerTest(CopyingManagerCommonTest):
@pytest.fixture(autouse=True)
def setup(self, worker_type, workers_count, worker_sessions_count):
super(CopyingManagerTest, self).setup()
self.use_multiprocessing_workers = worker_type == "process"
self.workers_count = workers_count
self.worker_sessions_count = worker_sessions_count
def teardown(self):
if self._instance is not None:
self._instance.stop_manager()
self._instance.cleanup()
super(CopyingManagerTest, self).teardown()
def _init_test_environment(
self,
use_pipelining=False,
config_data=None,
disable_flow_control=False,
):
pipeline_threshold = 1.1
if use_pipelining:
pipeline_threshold = 0.0
if config_data is None:
config_data = {}
if "workers" not in config_data:
workers = []
for i in range(self.workers_count - 1):
worker_config = {
"id": "key_id_%s" % i,
"api_key": "key_%s" % i,
}
workers.append(worker_config)
config_data["workers"] = workers
config_data["default_sessions_per_worker"] = self.worker_sessions_count
config_data["use_multiprocess_workers"] = self.use_multiprocessing_workers
config_data["disable_max_send_rate_enforcement_overrides"] = True
config_data["pipeline_threshold"] = pipeline_threshold
config_data["implicit_agent_log_collection"] = False
self._env_builder = TestEnvironBuilder()
self._env_builder.init_agent_dirs()
self._env_builder.init_config(config_data)
scalyr_logging.set_log_destination(
use_disk=True,
logs_directory=six.text_type(self._env_builder.config.agent_log_path),
agent_log_file_path="agent.log",
agent_debug_log_file_suffix="_debug",
)
scalyr_logging.__log_manager__.set_log_level(scalyr_logging.DEBUG_LEVEL_5)
self._env_builder.config.disable_flow_control = disable_flow_control
self._env_builder.config.skip_agent_log_change = False
def _create_manager_instance(self, auto_start=True):
self._instance = TestableCopyingManager(self._env_builder.config, [])
if auto_start:
self._instance.start_manager()
self._instance.run_and_stop_at(
TestableCopyingManagerFlowController.SLEEPING
)
return self._instance
def _init_manager(
self,
log_files_number=1,
auto_start=True,
use_pipelining=False,
config_data=None,
disable_flow_control=False,
): # type: (int, bool, bool, Dict, bool) -> Tuple[Tuple[TestableLogFile, ...], TestableCopyingManager]
if self._env_builder is None:
self._init_test_environment(
use_pipelining=use_pipelining,
config_data=config_data,
disable_flow_control=disable_flow_control,
)
if log_files_number is not None:
files = self._env_builder.recreate_files( # type: ignore
log_files_number, self._env_builder.non_glob_logs_dir # type: ignore
)
else:
files = tuple()
manager = self._create_manager_instance(auto_start=auto_start)
return files, manager # type: ignore
class TestBasic(CopyingManagerTest):
def test_multiple_workers(self):
_, manager = self._init_manager(2)
assert (
len(manager.worker_sessions)
== self.worker_sessions_count * self.workers_count
)
worker_pids = set(worker.get_pid() for worker in manager.worker_sessions)
if self.use_multiprocessing_workers:
assert len(worker_pids) == self.worker_sessions_count * self.workers_count
assert os.getpid() not in worker_pids
else:
# in case of non multiprocess workers, all workers has the same process id as the main process.
assert worker_pids == set([os.getpid()])
def test_generate_status(self):
(test_file, test_file2), manager = self._init_manager(2)
test_file.append_lines("line1")
test_file2.append_lines("line2")
assert set(self._wait_for_rpc_and_respond()) == set(["line1", "line2"])
status = manager.generate_status()
assert status.health_check_result == "Good"
return
def test_health_check_status(self):
(test_file, test_file2), manager = self._init_manager(2)
manager._CopyingManager__last_scan_attempt_time = time.time()
status = manager.generate_status()
assert status.health_check_result == "Good"
def test_health_check_status_failed(self):
(test_file, test_file2), manager = self._init_manager(2)
manager._CopyingManager__last_scan_attempt_time = time.time() - (1000 * 65)
status = manager.generate_status()
assert (
status.health_check_result
== "Failed, max time since last scan attempt (60.0 seconds) exceeded"
)
def test_health_check_status_worker_failed(self):
(test_file, test_file2), manager = self._init_manager(2)
# get all workers and simulate their last attempt timeout.
for worker in manager.worker_sessions:
worker.change_last_attempt_time(time.time() - (1000 * 65))
status = manager.generate_status()
if self.worker_sessions_count > 1 or self.workers_count > 1:
assert status.worker_sessions_health_check == "Some workers have failed."
assert status.health_check_result == "Good"
else:
assert (
status.worker_sessions_health_check
== "Worker session 'default-0' failed, max time since last copy attempt (60.0 seconds) exceeded"
)
assert status.health_check_result == "Good"
def test_failed_health_check_status_and_failed_worker(self):
(test_file, test_file2), manager = self._init_manager(2)
manager._CopyingManager__last_scan_attempt_time = time.time() - (1000 * 65)
# get all workers and simulate their last attempt timeout.
for worker in manager.worker_sessions:
worker.change_last_attempt_time(time.time() - (1000 * 65))
status = manager.generate_status()
if self.worker_sessions_count > 1 or self.workers_count > 1:
assert status.worker_sessions_health_check == "Some workers have failed."
assert (
status.health_check_result
== "Failed, max time since last scan attempt (60.0 seconds) exceeded"
)
else:
assert (
status.worker_sessions_health_check
== "Worker session 'default-0' failed, max time since last copy attempt (60.0 seconds) exceeded"
)
assert (
status.health_check_result
== "Failed, max time since last scan attempt (60.0 seconds) exceeded"
)
def test_checkpoints(self):
(test_file, test_file2), manager = self._init_manager(2)
# also add non-copying manager related checkpoints files, to be sure that the copying manager does not
# touch them. This emulates the case where some agent monitors also store their own state in checkpoint files
# and we must not consolidate them with the worker checkpoints.
monitor_checkpoint_file_names = [
"windows-event-checkpoints.json",
"docker-checkpoints.json",
"journald-checkpoints.json",
]
monitors_checkpoint_paths = {}
for name in monitor_checkpoint_file_names:
monitor_checkpoint_path = pathlib.Path(
self._env_builder.config.agent_data_path, name
)
check_text = "{0}. Do not delete me, please.".format(name)
# write some text to the monitor checkpoint files, just to verify that it is not changed later.
monitors_checkpoint_paths[monitor_checkpoint_path] = check_text
monitor_checkpoint_path.write_text(check_text)
test_file.append_lines("line1")
test_file2.append_lines("line2")
assert set(self._wait_for_rpc_and_respond()) == set(["line1", "line2"])
# stop the manager and write some lines.
# When manager is stared, it should pick recent checkpoints and read those lines.
manager.stop_manager()
test_file.append_lines("Line3")
test_file.append_lines("Line4")
self._instance = manager = TestableCopyingManager(self._env_builder.config, [])
manager.start_manager()
# make sure that the first lines are lines which were written before manager start
assert set(self._wait_for_rpc_and_respond()) == set(["Line3", "Line4"])
test_file.append_lines("Line5")
test_file.append_lines("Line6")
assert set(self._wait_for_rpc_and_respond()) == set(["Line5", "Line6"])
manager.stop_manager()
test_file.append_lines("Line7")
test_file.append_lines("Line8")
# make sure that all worker session checkpoint files are consolidated and removed.
for worker_session in manager.worker_sessions:
assert not worker_session.get_checkpoints_path().exists()
assert not worker_session.get_active_checkpoints_path().exists()
assert manager.consolidated_checkpoints_path.exists()
manager.consolidated_checkpoints_path.unlink()
self._instance = manager = TestableCopyingManager(self._env_builder.config, [])
manager.start_manager()
assert self._wait_for_rpc_and_respond() == []
test_file.append_lines("Line9")
test_file.append_lines("Line10")
assert set(self._wait_for_rpc_and_respond()) == set(["Line9", "Line10"])
# verify if monitor checkpoint file is remaining untouched.
for monitor_checkpoint_path, check_text in monitors_checkpoint_paths.items():
assert monitor_checkpoint_path.exists()
assert monitor_checkpoint_path.read_text() == check_text
def test_checkpoints_consolidated_checkpoints(self):
if self.worker_sessions_count == 1 and self.workers_count == 1:
pytest.skip("This test is only for multi-worker copying manager.")
(test_file, test_file2), manager = self._init_manager(2)
# write something and stop in order to create checkpoint files.
test_file.append_lines("line1")
test_file2.append_lines("line2")
assert set(self._wait_for_rpc_and_respond()) == set(["line1", "line2"])
manager.stop_manager()
# recreate the manager, in order to simulate a new start.
self._instance = manager = TestableCopyingManager(self._env_builder.config, [])
# start manager, it has to create consolidated checkpoint file when starts.
manager.start_manager()
manager.stop()
# add some new lines
test_file.append_lines("line3")
test_file2.append_lines("line4")
checkpoint_files = scalyr_util.match_glob(
six.text_type(manager.consolidated_checkpoints_path)
)
# verify that only one file remains and it is a consolidated file.
assert checkpoint_files == [str(manager.consolidated_checkpoints_path)]
# recreate the manager, in order to simulate a new start.
self._instance = manager = TestableCopyingManager(self._env_builder.config, [])
# start manager, it has to create consolidated checkpoint file when starts.
manager.start_manager()
assert set(self._wait_for_rpc_and_respond()) == set(["line3", "line4"])
@pytest.mark.skipif(
sys.version_info < (2, 7),
reason="This test case can not be run on python < 2.7",
)
@mock.patch.object(
TestingConfiguration, "log_deletion_delay", new_callable=PropertyMock
)
@mock.patch.object(
TestingConfiguration,
"max_new_log_detection_time",
new_callable=PropertyMock,
)
def test_log_processors_lifecycle(
self, log_deletion_delay, max_new_log_detection_time
):
# mock config values so we do not need to wait for the next file scan.
log_deletion_delay.return_value = -1
# do the same to not wait when copying manager decides that file is deleted.
max_new_log_detection_time.return_value = -1
test_files, manager = self._init_manager(10)
for i, test_file in enumerate(test_files):
self._append_lines(["file_{}_line1".format(i)], log_file=test_file)
assert manager.worker_sessions_log_processors_count == len(test_files)
assert manager.matchers_log_processor_count == len(test_files)
for log_file in test_files:
log_file.remove()
# 1) log processors perform file processing and close deleted files.
manager.wait_for_full_iteration()
# 2) Copying manager removes closed processors from its collection.
manager.wait_for_full_iteration()
# 3) Log matchers remove their log processors.
manager.wait_for_full_iteration()
# check if there are no log processors remaining inside workers and log matchers.
assert manager.worker_sessions_log_processors_count == 0
assert manager.matchers_log_processor_count == 0
# crete log file back and see if log processors are created back too.
for log_file in test_files:
log_file.create()
manager.wait_for_full_iteration()
assert manager.worker_sessions_log_processors_count == len(test_files)
assert manager.matchers_log_processor_count == len(test_files)
@pytest.mark.skipif(
sys.version_info < (2, 7),
reason="This test case can not be run on python < 2.7",
)
@mock.patch.object(
TestingConfiguration, "log_deletion_delay", new_callable=PropertyMock
)
@mock.patch.object(
TestingConfiguration,
"max_new_log_detection_time",
new_callable=PropertyMock,
)
def test_log_processors_lifecycle_with_glob(
self, log_deletion_delay, max_new_log_detection_time
):
# mock config values so we do not need to wait for the next file scan.
log_deletion_delay.return_value = -1
# do the same to not wait when copying manager decides that file is deleted.
max_new_log_detection_time.return_value = -1
_, manager = self._init_manager(0)
# create some matching files.
files = self._env_builder.recreate_files(
10, self._env_builder.non_glob_logs_dir
)
assert manager.worker_sessions_log_processors_count == 0
assert manager.matchers_log_processor_count == 0
# wait for copying manager adds log processors.
manager.wait_for_full_iteration()
# both workers and log log matches should contain new log processors.
assert manager.worker_sessions_log_processors_count == len(files)
assert manager.matchers_log_processor_count == len(files)
self._env_builder.remove_files(self._env_builder.non_glob_logs_dir)
# 1) log processors perform file processing and close deleted files.
manager.wait_for_full_iteration()
# 2) Copying manager removes closed processors from its collection.
manager.wait_for_full_iteration()
# 3) Log matchers remove their log processors.
manager.wait_for_full_iteration()
# check if there are no log processors remaining inside workers and log matchers.
assert manager.worker_sessions_log_processors_count == 0
assert manager.matchers_log_processor_count == 0
# crete log file back and see if log processors are created back too.
files = self._env_builder.recreate_files(
10, self._env_builder.non_glob_logs_dir
)
manager.wait_for_full_iteration()
assert manager.worker_sessions_log_processors_count == len(files)
assert manager.matchers_log_processor_count == len(files)
@pytest.mark.skipif(
sys.version_info < (2, 7),
reason="This test case can not be run on python < 2.7",
)
@mock.patch.object(
TestingConfiguration, "log_deletion_delay", new_callable=PropertyMock
)
@mock.patch.object(
TestingConfiguration,
"max_new_log_detection_time",
new_callable=PropertyMock,
)
def test_log_processors_lifecycle_with_dynamic_matchers(
self, log_deletion_delay, max_new_log_detection_time
):
# mock config values so we do not need to wait for the next file scan.
log_deletion_delay.return_value = -1
# do the same to not wait when copying manager decides that file is deleted.
max_new_log_detection_time.return_value = -1
_, manager = self._init_manager(0)
# create directory which is unknown for the managers configuration
logs_dir = self._env_builder.test_logs_dir / "dynamicaly-added-logs"
logs_dir.mkdir()
files = self._env_builder.recreate_files(10, logs_dir)
for file in files:
log_config = self._env_builder.config.parse_log_config(
{"path": file.str_path}
)
manager.add_log_config("scheduled-deletion", log_config)
assert manager.worker_sessions_log_processors_count == 0
assert manager.matchers_log_processor_count == 0
# wait for copying manager adds log processors.
manager.wait_for_full_iteration()
assert manager.worker_sessions_log_processors_count == len(files)
assert manager.matchers_log_processor_count == len(files)
self._env_builder.remove_files(logs_dir)
# 1) log processors perform file processing and close deleted files.
manager.wait_for_full_iteration()
# 2) Copying manager removes closed processors from its collection.
manager.wait_for_full_iteration()
# 3) Log matchers remove their log processors.
manager.wait_for_full_iteration()
# check if there are no log processors remaining inside workers and log matchers.
assert manager.worker_sessions_log_processors_count == 0
assert manager.matchers_log_processor_count == 0
# crete log file back and see if log processors are created back too.
files = self._env_builder.recreate_files(10, logs_dir)
manager.wait_for_full_iteration()
assert manager.worker_sessions_log_processors_count == len(files)
assert manager.matchers_log_processor_count == len(files)
def test_multiple_checkpoint_files_with_the_same_log_file(self):
(test_file,), manager = self._init_manager(1)
# write something and stop in order to create checkpoint files.
test_file.append_lines("line1")
assert set(self._wait_for_rpc_and_respond()) == set(["line1"])
# Stop the copying manager and save its resulting checkpoints and save them for later.
manager.stop_manager()
first_run_checkpoints = manager.consolidated_checkpoints
# recreate the manager, in order to simulate a new start and write new line.
self._instance = manager = TestableCopyingManager(self._env_builder.config, [])
manager.start_manager()
test_file.append_lines("line2")
assert set(self._wait_for_rpc_and_respond()) == set(["line2"])
manager.stop_manager()
# get checkpoints from the last run
checkpoints = manager.consolidated_checkpoints
# Now get the preserved checkpoints from the first run and make their timestamp bigger
# than in last checkpoints. Even if the write timestamp is bigger in the first checkpoints,
# the copying manager has to pick checkpoints from the second run, because the second run checkpoints
# have to have bigger timestamps for particular log files rather than overall write timestamp.
first_run_checkpoints["time"] = checkpoints["time"] + 1
# Inject invalid checkpoint as an active checkpoint from some very "distant" worker, which is not presented
# anymore, but its checkpoints are still have to be processed. By using active checkpoint, we also testing
# the edge case where the active checkpoints are presented but the "main" checkpoints are not.
injected_checkpoints_path = (
manager.consolidated_checkpoints_path.parent
/ "active-checkpoints-worker-10-0.json"
)
injected_checkpoints_path.write_text(
six.ensure_text(json.dumps(first_run_checkpoints))
)
# recreate the manager, in order to simulate a new start and write new line.
self._instance = manager = TestableCopyingManager(self._env_builder.config, [])
manager.start_manager()
# write new line and only this line has to be read by the copying manager.
test_file.append_lines("line3")
assert set(self._wait_for_rpc_and_respond()) == set(["line3"])
manager.stop()
class CopyingMangerUnitTest(ScalyrTestCase):
def test_process_checkpoints_on_startup(self):
config_data = {
"ignore_checkpoints_on_startup_path_globs": [
"/var/log/scalyr-agent-2/*.log"
],
}
env_builder = TestEnvironBuilder()
env_builder.init_agent_dirs()
env_builder.init_config(config_data=config_data)
manager = TestableCopyingManager(env_builder.config, [])
# 1. Empty checkpoints dict
# pylint: disable=no-member
result = manager._CopyingManager__process_checkpoints_on_startup({})
self.assertEqual(result, {})
# 2. No matching checkpoints
checkpoints = {
"/var/log/containers/container1.log": {},
"/var/log/containers/container2.log": {},
"/var/log/containers/container3.log": {},
}
# pylint: disable=no-member
result = manager._CopyingManager__process_checkpoints_on_startup(checkpoints)
self.assertEqual(result, checkpoints)
# 3. Single matching checkpoint
checkpoints = {
"/var/log/containers/container1.log": {},
"/var/log/scalyr-agent-2/agent.log": {},
"/var/log/containers/container2.log": {},
"/var/log/containers/container3.log": {},
}
expected_checkpoints = {
"/var/log/containers/container1.log": {},
"/var/log/containers/container2.log": {},
"/var/log/containers/container3.log": {},
}
# pylint: disable=no-member
result = manager._CopyingManager__process_checkpoints_on_startup(checkpoints)
self.assertEqual(result, expected_checkpoints)
# 4. Multiple matching checkpoints
checkpoints = {
"/var/log/containers/container1.log": {},
"/var/log/scalyr-agent-2/agent.log": {},
"/var/log/scalyr-agent-2/agent_debug.log": {},
"/var/log/scalyr-agent-2/cpu.profile": {},
"/var/log/containers/container2.log": {},
"/var/log/containers/container3.log": {},
"/var/log/scalyr-agent-2/linux_system_metrics.log": {},
}
expected_checkpoints = {
"/var/log/containers/container1.log": {},
"/var/log/scalyr-agent-2/cpu.profile": {},
"/var/log/containers/container2.log": {},
"/var/log/containers/container3.log": {},
}
# pylint: disable=no-member
result = manager._CopyingManager__process_checkpoints_on_startup(checkpoints)
self.assertEqual(result, expected_checkpoints)
| [
"noreply@github.com"
] | noreply@github.com |
b3c846fd274994e80d10172f9c89491cad92b480 | 6294155b9171145ca18ed027d6364fce97725dbb | /custom_apis/admin_api_views.py | efd0bb4b0554b03550f98f0d7b60a9ca10135b32 | [] | no_license | jiritichy/TCP | 344e12b47ffcbbd9d6100d1681d90f81aba8f591 | 46f69e24f6cab6f8ea3100941bc5dda3806637fa | refs/heads/master | 2023-04-08T10:32:05.093307 | 2021-04-02T08:46:11 | 2021-04-02T08:46:11 | 353,960,076 | 0 | 0 | null | 2021-04-02T08:44:51 | 2021-04-02T08:44:50 | null | UTF-8 | Python | false | false | 914 | py | from django.shortcuts import render
from authapp.models import (
WorkerDetails, JobDetails, User, Categories
)
from django.db import connection
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view,permission_classes
from rest_framework.permissions import IsAuthenticated
import requests
import json
from django.core import serializers
@permission_classes([])
@api_view(['GET'])
def get_worker_count(request):
worker_count = WorkerDetails.objects.all().count()
user_count = User.objects.all().count()
recruiter_count = user_count - worker_count
job_count = JobDetails.objects.all().count()
accepted_count = JobDetails.objects.filter(status = 2).count()
return Response(data={"worker_count": worker_count, "recruiter_count" : recruiter_count, "job_count" : job_count , "accepted_count" : accepted_count})
| [
"41832893+maliaditya@users.noreply.github.com"
] | 41832893+maliaditya@users.noreply.github.com |
288aa6b6538d62c997475ad87548e3a3c0fc5298 | 8523658161b0899130a9a3c6f272449a8515fddb | /1010.py | 6cea665649ffd6a918dfc5b2f43fe1a2b780ad65 | [] | no_license | juliocmalvares/URIOnlineJudge | 4f3ad32cc171c34a27110c79c81fd71e338bb6d0 | 233b78abcf4dbc22a6cd05ce3cfea841c76e1140 | refs/heads/master | 2020-03-13T09:55:22.494272 | 2018-04-25T20:28:10 | 2018-04-25T20:28:10 | 131,073,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | a = input()
b = input()
prim = [p for p in a.split()]
seg = [p for p in b.split()]
prim[0] = int(prim[0])
prim[1] = int(prim[1])
prim[2] = float(prim[2])
seg[0] = int(seg[0])
seg[1] = int(seg[1])
seg[2] = float(seg[2])
total = (prim[1]*prim[2])+(seg[1]*seg[2])
print("VALOR A PAGAR: R$ %.2f" % total)
| [
"juliocmalvares07@gmail.com"
] | juliocmalvares07@gmail.com |
67b0a46a7d02e459b2ca4a9e9d9c5635591b21bf | b659e99f89cf17ae886857383cb5b708847fe3f1 | /gettingStarted/problem7.py | 8402c5ac64f20f3cd28685736d51b82d10eddaae | [] | no_license | nitheeshmavila/practice-python | bea06cc4b2b9247b926e07fd5a3987552e531242 | f54bf8934a4cf160cdfc9dc43176f1eea3bc7a41 | refs/heads/master | 2021-07-03T17:24:29.450939 | 2021-06-16T08:40:48 | 2021-06-16T08:40:48 | 100,113,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | '''
Problem 7: How many multiplications are performed when each of the following lines(line 1 and line 2) of code is executed?
'''
noofCalls = 0
def square(n):
print(n)
global noofCalls
noofCalls += 1
return n*n
def printCalls():
print('no of multiplications performed:',noofCalls)
print(square(5)) # line1
printCalls()
print(square(2*5)) #line2
printCalls()
'''
output
-----
no of multiplications performed:1
'''
| [
"mail2nitheeshmavila@gmail.com"
] | mail2nitheeshmavila@gmail.com |
b435fe028ef1a1bc1f119e7b49dcf9d535a282ff | 49d0fe764d55de48bc71aa3cba02019ce7ff0abe | /chance/admin.py | 6b678fcc31994555d67f85e40433ec42aaaf39f8 | [] | no_license | benliles/django-chance | 546f800168517c9abeb34547c945537e91fed183 | e43a6b46d55a4159ae5ddf7752cda245eca74016 | refs/heads/master | 2020-05-18T16:32:06.212903 | 2013-07-17T15:52:08 | 2013-07-17T15:52:08 | 4,775,508 | 3 | 2 | null | 2013-07-19T15:20:41 | 2012-06-24T21:26:52 | Python | UTF-8 | Python | false | false | 3,756 | py | try:
from reversion import VersionAdmin as ModelAdmin
except ImportError:
from django.contrib.admin import ModelAdmin
from django.contrib import admin
from django.http import HttpResponse
from chance.models import (Event, EventFee, EventChoice, EventChoiceOption,
Registration, EventChoiceSelection, Talk, Transaction, Track,
ScheduleItem)
class EventFeeInlineAdmin(admin.TabularInline):
model = EventFee
extra = 1
class EventChoiceInlineAdmin(admin.StackedInline):
model = EventChoice
extra = 1
class EventAdmin(ModelAdmin):
date_hierarchy = 'starts'
inlines = [
EventFeeInlineAdmin,
EventChoiceInlineAdmin
]
class EventChoiceOptionInlineAdmin(admin.TabularInline):
model = EventChoiceOption
extra = 1
class EventChoiceAdmin(ModelAdmin):
inlines = [
EventChoiceOptionInlineAdmin
]
exclude = ('event',)
actions = ['selection_summary']
def selection_summary(self, request, queryset):
response = HttpResponse(mimetype='text/plain')
for choice in queryset.all():
response.write('%s (%s)\n' % (choice.label, choice.event.name,))
for option in choice.options.all():
response.write('\t%s: %d\n' % (option.display,
EventChoiceSelection.objects.filter(option=option).count(),))
response.write('\n')
response.write('\n')
return response
selection_summary.short_description = u'Summary of user selections for ' \
'a choice.'
class RegistrationAdmin(ModelAdmin):
model = Registration
exclude = ('event',)
list_filter = ('event', 'paid',)
actions = ['csv_summary']
def csv_summary(self, request, queryset):
import csv
response = HttpResponse(mimetype='text/csv')
writer = csv.writer(response)
for registration in queryset.all():
writer.writerow([
registration.attendee_name,
registration.attendee_email,
unicode(registration.paid)
] + [
selection.option.display for selection in
registration.selections.order_by('option__choice').all()])
return response
csv_summary.short_description = u'Get a CSV Summary'
class TalkAdmin(ModelAdmin):
list_filter = ('event', 'accepted',)
list_display = ('title','presenter','contact')
def contact(self, obj):
if obj.owner:
name = obj.owner.get_full_name()
if not name:
name = obj.owner.email
if not name:
name = obj.owner.username
if not name:
name = str(obj.owner.pk)
if obj.owner.email:
return u'<a href="mailto:%s">%s</a>' % (obj.owner.email,
name,)
return name
return 'None'
contact.short_description = u'Owner'
contact.allow_tags = True
class TransactionAdmin(ModelAdmin):
list_display = ('pk', 'owner', 'amount_paid', 'created', 'closed')
class ScheduleItemInlineAdmin(admin.TabularInline):
model = ScheduleItem
extra = 3
class TrackAdmin(ModelAdmin):
list_display = ('event','name','location',)
list_filter = ('event',)
inlines = [ScheduleItemInlineAdmin]
class ScheduleItemAdmin(ModelAdmin):
list_display = ('track','__unicode__',)
list_filter = ('track',)
admin.site.register(Event, EventAdmin)
admin.site.register(EventChoice, EventChoiceAdmin)
admin.site.register(Registration, RegistrationAdmin)
admin.site.register(Talk, TalkAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(Track, TrackAdmin)
admin.site.register(ScheduleItem, ScheduleItemAdmin)
| [
"benliles@arch.tamu.edu"
] | benliles@arch.tamu.edu |
c7c1943a417de7573e5aebf77ae57a09db5008a5 | 3b89c0a97ac6b58b6923a213bc8471e11ad4fe69 | /python/CodingExercises/LeetCode1.py | 86ca7efb65730bbd49152c8028c24b15a168c256 | [] | no_license | ksayee/programming_assignments | b187adca502ecf7ff7b51dc849d5d79ceb90d4a6 | 13bc1c44e1eef17fc36724f20b060c3339c280ea | refs/heads/master | 2021-06-30T07:19:34.192277 | 2021-06-23T05:11:32 | 2021-06-23T05:11:32 | 50,700,556 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | '''
1. Two Sum
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
def LeetCode1(ary,k):
dict={}
fnl_lst=[]
for i in range(0,len(ary)):
key=ary[i]
diff=k-key
if diff in dict.keys():
val=dict[diff]
tup=(i,val)
fnl_lst.append(tup)
else:
dict[key]=i
return fnl_lst
def main():
ary=[2, 7, 11, 15]
k=9
print(LeetCode1(ary,k))
if __name__=='__main__':
main() | [
"kartiksayee@gmail.com"
] | kartiksayee@gmail.com |
4271c90c8228df812910694cbfa86764743f6aec | 7e71e9002f7a87c0fe559a579cde63e21bd43974 | /datascience_oper_empmst.py | 89a2d09cfe280286d999516e0bdb51141df8a7b0 | [] | no_license | ashish-2412/Employee-Management-and-Salary-Prediction | 0236b0af6a0f1e2fd9fc39c5b9152a8018a3aea3 | 9ad075f39b5ec37e48e70fdf4654908c3713798d | refs/heads/master | 2021-01-02T20:51:10.377009 | 2020-02-11T09:53:35 | 2020-02-11T09:53:35 | 239,795,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,956 | py | # Program name : datascience_oper_empmst.py
# Data science operations on empmst.csv
import pandas as pd
import numpy as np
from datetime import datetime
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
empdf = pd.read_csv('empmst.csv', index_col='Empno', parse_dates=['DOB'])
print(empdf.info())
# Drop null values. axis=0 for rowwise drop, axis=1 for col wise.
# 'any' means - drop the row if any col in that row is null
# 'all' means = drop the row if all cols in that row is null
empdf.dropna(axis=0, how='any', inplace=True)
print(empdf.info())
# Add columns Conv. Salary
empdf['Conv']=empdf.Salary * 0.30
empdf['Total']= empdf.Salary + empdf.HRA + empdf.Conv
print(empdf.head())
# Calculate Age and add the column
dt = datetime.today()
age = dt - empdf['DOB']
empdf['Age'] = age/np.timedelta64(1,'Y') # divide by 'Y' for year. Age comes in float
empdf["Age"] = empdf['Age'].astype(int) # convert to int
print(empdf.head())
# Write the new DF to a CSV file
empdf.to_csv('c:\\CSI\CSI Summer Proj\\CSV XLS files\\empnew.csv')
# Multiple cols selections
col_lst = ['Empname','Salary']
print(empdf[col_lst]) # or empdf['Empname', 'Salary']
# Select rows with iloc
print(empdf.iloc[:2]) # Top two records
print(empdf.iloc[-2:]) # bottom 2 rows
print(empdf.iloc[:, 0:3]) # All rows with first 3 cols
print(empdf[empdf['Salary'] > 100000]) # Rows with salary > 1L
# AND condition. Rows with Salary > 1L and lives in Kolkata
print(empdf[ (empdf['Salary'] > 100000) & (empdf['City']=='Kolkata')])
# OR condition. Rows with Salary > 1L or lives in WB
print(empdf[ (empdf['Salary'] > 100000) | (empdf['State']=='West Bengal')])
# Rename columns
empdf = empdf.rename(columns={'ExpYr':'Expr'}) # To rename more cols e.g. add 'Salary':'Basic' in { }
print(empdf.head())
# Check for any column/feature having null values or not
print(empdf['DOB'].isnull()) # All should be False as we have removed earlier rows with null values
# Add a column 'marital_stat' and initialized with 0
empdf['marital_stat'] = int(0)
print(empdf.head())
# replace 0 in that field with '-1' just for testing
empdf['marital_stat'].replace(0,-1,inplace=True)
print(empdf.head())
# Find the mean, min, max, median, std deviation of Salary but group by 'State'
print('Mean, min, max, count, median, std of Salary group by State')
print('Mean:\n', empdf['Salary'].groupby(empdf['State']).mean())
print('Min:\n', empdf['Salary'].groupby(empdf['State']).min())
print('Max:\n', empdf['Salary'].groupby(empdf['State']).max())
print('Count :\n',empdf['Salary'].groupby(empdf['State']).count())
print('Median:\n',empdf['Salary'].groupby(empdf['State']).median())
print('Standard Deviation:\n', empdf['Salary'].groupby(empdf['State']).std())
print('Correlation :\n', empdf.corr())
# Sorting rows
print(empdf.sort_values(by='DOB')) # Ascending
print(empdf.sort_values(by='Salary', ascending=False)) # Descending sort
# Sort on multile fields
print(empdf.sort_values(by=['State', 'City']))
# Convert datatype of a field
empdf['marital_stat'].astype(str, inplace=True)
print(empdf)
# map with DF - map - takes the series and iterate over it
# applymap - applies to each cell/element in the DF
# apply - to apply a function along with the axis
empdf['HRA'] = empdf['HRA'].map(lambda x: x-1000) # By map/lambda, reduce HRA by 1000 for
# all employees but
# to update original DF assign on LHS
print(empdf.head())
# bin categories of Age column and create a new column 'AgeCat'
# Seggregate by Pandas 'cut' method
from sklearn import preprocessing
bins = [20, 30, 40, 50, 60]
group_names = ['20 to 30', '30 to 40', '40 to 50', '50 to 60']
empdf['AgeCat']=pd.cut(empdf['Age'], bins, labels=group_names)
print(empdf.head())
| [
"ashishbarca@gmail.com"
] | ashishbarca@gmail.com |
3acd2877be2d35889598ed2111ffaffb3f802be0 | 4b434c6af1d205e33941289211159dfde865e38e | /con.Bmaml.eq/train.py | fdc88d237727a3c3e47393deafa25044993743e3 | [] | no_license | a1600012888/BMAML | 3b2a7f264ed13ef598cc3677d18714c4f8354176 | 4802a917d8061011be9a2b09174598216812cc58 | refs/heads/master | 2020-04-14T19:10:40.363219 | 2019-01-16T17:03:18 | 2019-01-16T17:03:18 | 164,047,888 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,915 | py | import torch
from collections import OrderedDict
from tqdm import tqdm
from utils import AvgMeter
from torch.nn.utils import vector_to_parameters, parameters_to_vector
def TrainOneTask(Task, M, SVGD, optimizer, DEVICE, num_of_step = 3, step_size = 1e-3):
X, Y, Xtest, Ytest, std = Task
X = X.to(DEVICE)
Y = Y.to(DEVICE)
Xtest = Xtest.to(DEVICE)
Ytest = Ytest.to(DEVICE)
std = std.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
with torch.no_grad():
start_logp = 0
for paramsvec in M:
start_logp = start_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
start_logp = start_logp / len(M)
for i in range(num_of_step):
M = SVGD.step(M, retain_graph = True, step_size = step_size)
with torch.no_grad():
end_logp = 0
for paramsvec in M:
end_logp = end_logp + SVGD.NablaLogP(True, paramsvec, ret_grad = False)
end_logp = end_logp / len(M)
SVGD.NablaLogP.update(Xtest, Ytest, std)
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
loss = logp * -1.0
loss.backward()
optimizer.step()
optimizer.zero_grad()
ret_dic = OrderedDict()
ret_dic['start_logp_train'] = start_logp.item()
ret_dic['end_logp_train'] = end_logp.item()
ret_dic['end_logp_joint'] = logp.item()
return ret_dic
def TrainOneTaskWithChaserLoss(Task, M, SVGD, optimizer, DEVICE, num_of_step = 3, step_size = 1e-3):
optimizer.zero_grad()
X, Y, Xtest, Ytest, std = Task
X = X.to(DEVICE)
Y = Y.to(DEVICE)
Xtest = Xtest.to(DEVICE)
Ytest = Ytest.to(DEVICE)
std = std.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
# Compute the LogP for initial particles (For hyper-param tuning)
with torch.no_grad():
start_logp = 0
for paramsvec in M:
start_logp = start_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
start_logp = start_logp / len(M)
# Inner fit
for i in range(num_of_step):
M = SVGD.step(M, retain_graph = True, step_size = step_size)
# Compute the LogP of the training set after the fitting (For hyper-param tuning)
with torch.no_grad():
end_logp = 0
for paramsvec in M:
end_logp = end_logp + SVGD.NablaLogP(True, paramsvec, ret_grad = False)
end_logp = end_logp / len(M)
Xtrain_and_test = torch.cat((X, Xtest))
Ytrain_and_test = torch.cat((Y, Ytest))
SVGD.NablaLogP.update(Xtrain_and_test, Ytrain_and_test, std)
SVGD.InitMomentumUpdaters()
# Compute the LogP of the whole set after the fitting (For hyper-param tuning)
with torch.no_grad():
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
# Approximate the true prior
M_true = []
for paramsvec in M:
m = torch.nn.ParameterList([torch.nn.Parameter(p.detach()) for p in paramsvec])
#m = [p.detach() for p in paramsvec]
M_true.append(m)
#M_true = SVGD.step(M, retain_graph=False, step_size=step_size)
for i in range(num_of_step):
M_true= SVGD.step(M_true, retain_graph=False, step_size=step_size)
chaser_loss = 0
for paramsvec, paramsvec_true in zip(M, M_true):
vec = parameters_to_vector(paramsvec)
vec_true = parameters_to_vector(paramsvec_true).detach()
chaser_loss = chaser_loss + torch.dot((vec - vec_true),(vec - vec_true) )
#for param, param_true in zip(paramsvec, paramsvec_true):
# chaser_loss = chaser_loss + torch.mean((param - param_true.detach()) ** 2)
chaser_loss = chaser_loss / len(M)
# Compute the true LogP of the whole set (For hyper-param tuning)
with torch.no_grad():
true_logp = 0
for paramsvec in M_true:
true_logp = true_logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
true_logp = true_logp / len(M)
chaser_loss.backward()
optimizer.step()
optimizer.zero_grad()
ret_dic = OrderedDict()
ret_dic['start_logp_train'] = start_logp.item()
ret_dic['end_logp_train'] = end_logp.item()
ret_dic['end_logp_joint'] = logp.item()
ret_dic['true_logp_joint'] = true_logp.item()
ret_dic['chaser_loss'] = chaser_loss.item()
return ret_dic
def test(TaskLoader, M, SVGD, DEVICE, num_of_step = 3, step_size = 1e-3):
'''
test for continious
'''
raw_M = M
LogP = AvgMeter()
pbar = tqdm(range(100))
for i in pbar:
task = next(TaskLoader)
for j in range(len(task)-1):
X, Y, Xtest, Ytest, std = task[j]
X_next, Y_next, Xtest_next, Ytest_next, std_next = task[j+1]
X = X.to(DEVICE)
Y = Y.to(DEVICE)
#Xtest = Xtest.to(DEVICE)
#Ytest = Ytest.to(DEVICE)
#std = std.to(DEVICE) * 100 # * 100 to stablize
Xtest = Xtest_next.to(DEVICE)
Ytest = Ytest_next.to(DEVICE)
std = std_next.to(DEVICE) * 100 # * 100 to stablize
SVGD.NablaLogP.update(X, Y, std)
SVGD.InitMomentumUpdaters()
#Mt = SVGD.step(M, retain_graph=False, step_size=step_size)
for tt in range(num_of_step):
M = SVGD.step(M, retain_graph = False, step_size = step_size )#/ (len(task) -1 ))
SVGD.NablaLogP.update(Xtest, Ytest, std)
with torch.no_grad():
logp = 0
for paramsvec in M:
logp = logp + SVGD.NablaLogP(True, paramsvec, ret_grad=False)
logp = logp / len(M)
LogP.update(logp.item())
pbar.set_description("Running Validation")
pbar.set_postfix({'Logp_test':LogP.mean})
M = raw_M
return LogP.mean
| [
"1600012888@pku.edu.cn"
] | 1600012888@pku.edu.cn |
f53ed7447917dec09d5d66ad99297a866cab65af | 78f3fe4a148c86ce9b80411a3433a49ccfdc02dd | /2018/11/graphics/elex18-all-suburb-map-20181119/graphic_config.py | 006fc1be9c13a6867f9c6636d339a291b2f137a6 | [] | no_license | nprapps/graphics-archive | 54cfc4d4d670aca4d71839d70f23a8bf645c692f | fe92cd061730496cb95c9df8fa624505c3b291f8 | refs/heads/master | 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '105w9FOQjFxe2xS_gA8rB6fXNWs-Tlyr4Jgu3icfRJgI'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| [
"ahurt@npr.org"
] | ahurt@npr.org |
d4e5bd09306a3676f8bc4c9f4595b3b8c8f20ce2 | c9ffdd094500896ac61726bdba581d8d8f1c0939 | /bookify/summarizer/views.py | ff8d79961151ee904bc9a62fc8302ba9919bccb0 | [] | no_license | djm-1/Hacknitp3.0 | abb6a5da4e7be30b7bef8851c8be48f73d877ed6 | 2fa67f5e023a8de143332d77c46211dd04f2a4f0 | refs/heads/master | 2023-02-15T18:57:41.164864 | 2021-01-17T18:50:39 | 2021-01-17T18:50:39 | 329,995,750 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,534 | py | # importing libraries
from django.shortcuts import render,HttpResponse
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import pdfplumber
import re
import os
import goslate
from textblob import TextBlob
from gtts import gTTS
from .models import Bookify
def index(request):
if request.method == 'POST':
uploaded_file=request.FILES['myfile']
#print(uploaded_file.name)
file_extension=uploaded_file.name.split('.')[-1]
if file_extension =='pdf':
new_audio_book = Bookify.objects.create(input_file=uploaded_file)
#pdf = pdfplumber.open(uploaded_file)
pdf = pdfplumber.open(new_audio_book.input_file)
text=''
for i in range(len(pdf.pages)):
page = pdf.pages[i]
text += page.extract_text()
article_text = re.sub(r'\[[0-9]*\]', ' ', text)
article_text = re.sub(r'\s+', ' ', article_text)
# Removing special characters and digits
formatted_article_text = re.sub('[^a-zA-Z]', ' ', article_text )
formatted_article_text = re.sub(r'\s+', ' ', formatted_article_text)
sentence_list = nltk.sent_tokenize(article_text)
stopwords = nltk.corpus.stopwords.words('english')
word_frequencies = {}
for word in nltk.word_tokenize(formatted_article_text):
if word not in stopwords:
if word not in word_frequencies.keys():
word_frequencies[word] = 1
else:
word_frequencies[word] += 1
maximum_frequncy = max(word_frequencies.values())
for word in word_frequencies.keys():
word_frequencies[word] = (word_frequencies[word]/maximum_frequncy)
sentence_scores = {}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequencies.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = word_frequencies[word]
else:
sentence_scores[sent] += word_frequencies[word]
import heapq
summary_sentences = heapq.nlargest(10, sentence_scores, key=sentence_scores.get)
summary = ' '.join(summary_sentences)
print(summary)
summary = TextBlob(summary)
lang=request.POST.get('lang')
#print(lang)
if lang!='en':
summary= summary.translate(from_lang='en',to=lang)
#print(summary)
tts=gTTS(str(summary),lang=lang)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#tts.save(uploaded_file.name.split('.')[0]+'.mp3')
tts.save("%s.mp3" % os.path.join(BASE_DIR+'/media/audio/',uploaded_file.name.split('.')[0]))
Bookify.objects.filter(id=new_audio_book.id).update(output_file='audio/'+uploaded_file.name.split('.')[0]+'.mp3')
pdf.close()
return render(request,'index.html',{'model':Bookify.objects.get(id=new_audio_book.id)})
#return
else:
return HttpResponse('file format should be pdf only !')
else:
return render(request,'index.html')
| [
"dibyajyoti.bhs@gmail.com"
] | dibyajyoti.bhs@gmail.com |
5ff24a90944791e22d0405a2032a3cff6be92e21 | 9ceb65546ca77d7b79893206b31403ebc523c902 | /SiNE/Doc2Vec.py | 90ee3e29f25eb7b28eb47d16096551a43ad5b9f6 | [] | no_license | Ancrilin/machinelearning | 259a925cecb32aa053448275fb3af16e518166e1 | 96ad754a557fe64c6dcd0e171f3c98f3c4fac8cc | refs/heads/master | 2020-08-01T14:58:30.636276 | 2019-11-20T13:21:08 | 2019-11-20T13:21:08 | 173,422,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,881 | py | from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import gensim
import numpy
import csv
filepath = "data/20ClassesRawData_API_cleanTag.csv"
stop_path = 'data/stop.txt'
name = []
label_dict = {}
label = []
title_dict = {}
doc = []
stop = []
with open(stop_path, 'r', encoding='utf-8')as stp:#停用词表
for row in stp:
stop.append(row.strip())
with open(filepath, 'r', encoding="utf-8") as fp:
reader = csv.reader(fp)
i = -1
for row in reader:
description = row[3]
t_name = row[2]
t_name = t_name.replace(" API",'')#除去后面API字样
name.append(t_name)
t_label = row[5]
label.append(t_label)
description = description.replace('\n','').replace('.','').replace(',','').replace('(','').replace(')','').lower().strip().split(' ')
for each in description:
if each in stop:
description.remove(each)#去除停用词
# print(each)
# print(description)
doc.append(description)
# print(description)
if t_name not in title_dict:
title_dict[t_name] = i
if t_label not in label_dict:
label_dict[t_label] = i
i = i + 1
# print(row)
del title_dict['name']
del label_dict['category']
name = name[1:]
label = label[1:]
doc = doc[1:]
j = 0
# print(title_dict)
# print(label_dict)
# print(doc[0])
data = []
# print(name)
print(label)
print(label_dict)
for i in range(len(doc)):
# data.append(TaggedDocument(doc[i], tags=[i]))
t = []
t.append(label_dict[label[i]])
tag = TaggedDocument(doc[i], tags=t)
data.append(tag)
print(tag)
# print(t)
# print(label_dict[label[i]])
# print(doc[i])
# print(data)
# model = Doc2Vec(dm=1, min_count=1, window=3, vector_size=200, sample=1e-3, negative=5)
# model.train(data, total_examples=model.corpus_count, epochs=500)
def train(x_train, size=200, epoch_num=1): ##size 最终训练出的句子向量的维度
model_dm = Doc2Vec(x_train, min_count=3, window=5, vector_size=size, sample=1e-3, negative=5, workers=4)
model_dm.train(x_train, total_examples=model_dm.corpus_count, epochs=10)
model_dm.save('model/model_dm') ##模型保存的位置
return model_dm
print(len(data))
model = train(data)
with open("result/description_vec_label.csv", 'w', encoding="utf-8", newline='') as fp:
header = ['name', 'vector']
writer = csv.writer(fp)
writer.writerow(header)
for each in range(len(data)):
row = []
t_name = name[each]
vec = model.docvecs[each]
t_label = label[each]
# print(t_name)
# print(vec)
row.append(t_name)
row.append(vec)
row.append(t_label)
writer.writerow(row)
model.wv.save_word2vec_format('result/word_vec.txt',binary = False)
| [
"1162236967@qq.com"
] | 1162236967@qq.com |
734bf560f6432a6a310f7a443c030f24bb698856 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/358/usersdata/288/102525/submittedfiles/estatistica.py | 9fdd68752a0b6be1cd5aa630f88df85ada3bc87a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # -*- coding: utf-8 -*-
def media(lista):
media=sum(lista)/len(lista)
return media
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio_padrao(lista):
soma=0
for i in range (0,len(lista),1):
soma+=((media(lista)-lista[i])**2)
desvio=(soma/(n-1))**0.5
return desvio
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
m=int(input("Digite a quantidade de colunas: "))
n=int(input("Digite a quantidade de linhas: "))
matriz=[]
for i in range (0,m,1):
linha=[]
for j in range (0,n,1):
linha.append(int(input("Digite o %d numero da matriz: "%(j+1))))
matriz.append(linha)
for i in range (0,n,1):
print (media(matriz[i]))
print ("%.2f"%(desvio_padrao(matriz[i]))) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8ec0d9d19b8d82342da6d1286c1b84ca154226d9 | 99bc8c61bd3c934d702b54566dbe1f6354374ea4 | /gpx2csv.py | 33f4b02cf35727ce1a942ba13e14d0e0e9d62c6d | [
"MIT"
] | permissive | oneandonlyoddo/gpx2csv | e107aa288b3810c35a9a0056348196f758c32901 | 41f7bd14661383350a35df22de1652d1898f379e | refs/heads/master | 2023-01-14T01:01:11.011082 | 2020-11-09T13:20:46 | 2020-11-09T13:20:46 | 311,342,967 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,151 | py | #!/usr/bin/python
# gpx 2 csv
# Jonas Otto - helloworld@jonasotto.de
# Converting GPX files exported from Strava .csv files
# adding Sun Elevation, Azimuth and estimated Clear Sky Radiation
# for data visualisation in Houdini etc.
import csv
from datetime import datetime as dt
from datetime import timezone
import math
import re
import argparse
import os
import sys
def convert(file_path):
file_name = os.path.basename(file_path)
without_extension = ".".join(file_name.split(".")[:-1])
file_path_only = os.path.dirname(file_path)
print_info("Starting conversion of %s." % file_name)
start = dt.now()
with open(file_path, 'r') as gpx_file:
gpx = gpxpy.parse(gpx_file)
csv_data = []
start_time = None
for track in gpx.tracks:
for segment in track.segments:
for i, point in enumerate(segment.points):
date = point.time
date_string = date.strftime("%Y-%m-%d %H:%M:%S")
date_timestamp = date.replace(tzinfo=timezone.utc).timestamp()
if not start_time:
start_time = date_timestamp
time_offset = date_timestamp - start_time
lat = point.latitude
long = point.longitude
alt = get_altitude(lat, long, date)
azi = get_azimuth(lat, long, date)
ele = point.elevation
extensions = {"atemp": None, "hr": None, "cad": None, "power": None}
for ext in point.extensions:
children = list(ext)
if len(children) > 0:
for extchild in list(ext):
tag = re.sub(r'{.*}', '', extchild.tag).strip()
value = extchild.text
if tag in extensions:
extensions[tag] = value
else:
if ext.tag in extensions:
extensions[ext.tag] = ext.text
speed = segment.get_speed(i)
radi = radiation.get_radiation_direct(date, alt)
csv_row = [lat, long, ele, extensions["atemp"], extensions["hr"], extensions["cad"], extensions["power"], speed, alt, azi, radi, date_string, date_timestamp, time_offset]
csv_data.append(csv_row)
csv_name = "%s.csv" % (without_extension)
csv_file_path = os.path.join(file_path_only, csv_name)
with open(csv_file_path, 'w', newline='') as csv_file:
wr = csv.writer(csv_file)
header = ["latitude", "longitude", "elevation", "temperature", "heart_rate", "cadence", "power", "speed_ms", "sun_altitude", "sun_azimuth", "sun_radiation", "date", "timestamp", "time_offset"]
wr.writerow(header)
for row in csv_data:
wr.writerow(row)
end = dt.now()
duration = end - start
mins = duration.total_seconds() / 60.0
print_info("Finised converting %s" % file_name)
print_info("Processing time: %f minutes" % mins)
print_line()
def get_gpxfiles_from_folder(folder_path):
files = os.listdir(folder_path)
gpxfiles = [os.path.join(folder_path,file) for file in files if file.endswith(".gpx")]
return gpxfiles
def main():
start = dt.now()
parser = argparse.ArgumentParser(description='Converting GPX files exported from Strava .csv files for data visualisation in Houdini etc.')
parser.add_argument('--file', help='A single .gpx file to convert')
parser.add_argument('--folder', help='A folder of .gpx files to batch convert')
args = parser.parse_args()
gpxfile = args.file
gpxFolder = args.folder
files_to_convert = []
if gpxFolder is None and gpxfile is not None:
# single file conversion
print_info("Attempting a single file conversion")
files_to_convert.append(gpxfile)
elif gpxfile is None and gpxFolder is not None:
# batch conversion
print_info("Attempting a batch file conversion")
files_to_convert = get_gpxfiles_from_folder(gpxFolder)
elif gpxfile is None and gpxFolder is None:
# you retard did it wrong
print_warning("Please supply a file or a folder.")
print_warning("Run python gpx2csv.py --help for more information.")
exit()
else:
# you retard did it wrong
print_warning("Please only supply a file OR a folder. Not both.")
print_warning("Run python gpx2csv.py --help for more information.")
exit()
print_info("Found %d .gpx files to convert." % (len(files_to_convert)) )
for i,file in enumerate(files_to_convert):
print_info("%i / %i" % (i, len(files_to_convert)))
convert(file)
end = dt.now()
duration = end - start
mins = duration.total_seconds() / 60.0
print_info("Finished converting all .gpx files to .csv files.")
print_info("Processing time: %f minutes" % mins)
print_info("You can find all generated .csv files next to their originals")
def print_line():
print("----------------------------------------------------------------------------------")
def print_info(info_text):
#prints in green
print('\033[92m' + info_text + '\033[0m')
def print_warning(warning_text):
#prints in red
print('\033[91m' + warning_text + '\033[0m')
#os.system('color')
print_line()
try:
from pysolar.solar import *
except ImportError as error:
print_warning("Error: Couldn't import the pysolar library.")
print_warning("Please install via pip install -r requirements.txt or pip install pysolar.")
print_line()
exit()
try:
import gpxpy
except ImportError as error:
print_warning("Error: Couldn't import the gpxpy library.")
print_warning("Please install via pip install -r requirements.txt or pip install gpxpy.")
print_line()
exit()
if __name__ == "__main__":
main() | [
"jonas.otto@field.io"
] | jonas.otto@field.io |
6d839d38862e2800ecd290f48f06e18d971eacd4 | acc307e621c01a98f93be97b8082c82b335f83d8 | /nate/ejercicios/conceptos_en_practica_pt1/tablas_de_multiplicar/invertir_range.py | 4c1a1ae2edc54212d987c11e11ae44db0b5c3247 | [] | no_license | catrielzz/python_practices | ea7496b2aeb12f8f1f0bb3e925c1fd828e3bc066 | ed363093bba79294687e31655c5213b368bd0d3c | refs/heads/master | 2022-04-12T01:22:10.888881 | 2020-03-05T16:21:55 | 2020-03-05T16:21:55 | 161,086,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | numero_tabla = int(input("De que numero quieres la tabla de multiplicar: "))
for multiplo in reversed(range(1, 11)):
print("{} x {} = {}".format(numero_tabla, multiplo, numero_tabla * multiplo))
| [
"estradayoel@gmail.com"
] | estradayoel@gmail.com |
a983c342b4e9049da5aaf95545a9a0af1b89c60e | 831431d77d3078d60b3dbab4bd65996de27c03dc | /OrderManagement/views.py | f9e6969c3379c74fcec619e531cc423c65e64dd3 | [] | no_license | anika07svg/E-COOKBOOK | 1406ced774b9224188767e1a9a6e544a813b5b28 | 7e13608235b396b89a8ff51fd5bee2703b5ce7dd | refs/heads/master | 2023-03-31T02:47:23.241107 | 2021-04-04T17:20:04 | 2021-04-04T17:20:04 | 349,135,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | from django.shortcuts import render, get_object_or_404, redirect, HttpResponseRedirect
from .models import Order
from .forms import OrderForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Food
from django.urls import reverse
from FoodManagement .models import Cart
# Create your views here.
def showOrders(request):
orderList = Order.objects.all()
context = {
'Order': orderList
}
return render(request, 'OrderManagement/OrderList.html', context)
def registration(request):
form = UserCreationForm()
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
context = {
'form' : form
}
return render(request, 'OrderManagement/registration.html', context)
@login_required
def my_orders(request):
orders = Order(user=request.user)
try:
orders = Order.objects.filter(user=request.user)
order_status = True
except orders.DoesNotExist:
orders = Order(user=request.user)
order_status = False
total = 0.0
for order in orders:
total += order.food.Food_Price
context = {
'orders': orders,
'order_status': order_status,
'total' : total
}
return render(request, 'OrderManagement/Order.html', context)
@login_required
def make_order(request, food_id):
food = get_object_or_404(Food, id=food_id)
order = Order(user=request.user, food=food)
order.save()
print("Order done!")
cart = Cart.objects.get(user=request.user)
cart.food.remove(food)
cart.save()
print("Remove done!")
return redirect('cart')
def test(request):
print(request.POST)
return redirect('Food')
# def bkash_order(request, food_id):
# food = get_object_or_404(Food, id=food_id)
# order = Order(user=request.user, food=food)
# order.transaction_id = request.POST['transaction_id']
# order.payment_options = 'Bkash'
# order.save()
#
# cart = Cart.objects.get(user=request.user)
# cart.food.remove(food)
# cart.save()
#
# #return HttpResponseRedirect(reverse('cart'))
# return redirect('cart')
#
# @login_required
# def update_cart(request, Food_id):
#
# food = get_object_or_404(Food, id=Food_id)
# cart = get_object_or_404(Cart, user=request.user)
#
# cart.food.add(food)
# cart.save()
#
# return redirect('cart')
#
# '''
# try:
# cart = Cart.objects.get(user=request.user)
# except cart.DoesNotExist:
# cart = Cart(user=request.user)
# '''
# @login_required
# def delete_from_cart(request, Food_id):
#
# food = get_object_or_404(Food, id=Food_id)
# cart = Cart.objects.get(user=request.user)
#
# cart.food.remove(food)
# cart.save()
#
# return redirect('cart')
@login_required
def make_order(request, food_id):
food = get_object_or_404(Food, id=food_id)
order = Order(user=request.user, food=food)
order.save()
return redirect('Food') | [
"18101007@uap-bd.edu"
] | 18101007@uap-bd.edu |
0fa8fa2d1973353700ed4feebe10f79b52b7481f | 27ed6d2db4f38cd351b642042341771d93aee121 | /python/medicUI/widgets.py | 89f63dc250a9fceacb72d24418ce995ab37e74c9 | [
"MIT"
] | permissive | wmoten/medic | eab20630e6666372e50d12fa0998ceefc9411e68 | bc2e9ec09e33ce2d0cedd8dc0c17f567208503ed | refs/heads/master | 2020-04-07T09:58:58.947534 | 2018-03-08T05:51:35 | 2018-03-08T05:51:35 | 124,202,109 | 0 | 0 | MIT | 2018-03-07T08:13:48 | 2018-03-07T08:13:48 | null | UTF-8 | Python | false | false | 23,842 | py | from Qt import QtWidgets, QtCore, QtGui
from . import model
from . import delegate
from . import functions
import medic
import os
import re
IconDir = os.path.abspath(os.path.join(__file__, "../icons"))
class ParameterFunctions():
@staticmethod
def SetParmeterValue(param_container, pram_dict):
for prm in pram_dict:
if prm["function"]:
prm["function"](param_container, prm["name"], prm["widget"])
@staticmethod
def SetInt(param, name, widget):
t = widget.text()
if not t:
t = 0
param.set(name, int(t))
@staticmethod
def SetFloat(param, name, widget):
t = widget.text()
if not t:
t = 0
param.set(name, float(t))
@staticmethod
def SetBool(param, name, widget):
param.set(name, widget.isChecked())
@staticmethod
def SetString(param, name, widget):
param.set(name, str(widget.text()))
@staticmethod
def CreateWidget(info):
name, label, parm_type, default = info
if parm_type is medic.Types.Null or\
parm_type is medic.Types.BoolArray or\
parm_type is medic.Types.IntArray or\
parm_type is medic.Types.FloatArray or\
parm_type is medic.Types.StringArray:
print "This type parameter is not supported yet : %s" % parm_type
return None, None
widget = None
function = None
if parm_type == medic.Types.Bool:
widget = QtWidgets.QCheckBox()
widget.setChecked(default)
function = ParameterFunctions.SetBool
elif parm_type == medic.Types.Int:
widget = NumericLine.CreateIntLine()
widget.setText(str(default))
function = ParameterFunctions.SetInt
elif parm_type == medic.Types.Float:
widget = NumericLine.CreateFloatLine()
widget.setText(str(default))
function = ParameterFunctions.SetFloat
elif parm_type == medic.Types.String:
widget = QtWidgets.QLineEdit()
widget.setText(default)
function = ParameterFunctions.SetString
widget.setObjectName("parameter_widget")
return widget, function
class BrowserButtonWidget(QtWidgets.QFrame):
BackClicked = QtCore.Signal()
NextClicked = QtCore.Signal()
def __init__(self, parent=None):
super(BrowserButtonWidget, self).__init__(parent=parent)
self.setObjectName("medic_browser_buttons_widget")
self.__back_button = None
self.__next_button = None
self.__makeWidgets()
def __makeWidgets(self):
main_layout = QtWidgets.QHBoxLayout()
self.setLayout(main_layout)
self.__back_button = QtWidgets.QPushButton()
self.__next_button = QtWidgets.QPushButton()
self.__back_button.setObjectName("medic_browser_back")
self.__next_button.setObjectName("medic_browser_next")
main_layout.addWidget(self.__back_button)
main_layout.addWidget(self.__next_button)
main_layout.setSpacing(1)
self.__back_button.clicked.connect(self.BackClicked.emit)
self.__next_button.clicked.connect(self.NextClicked.emit)
def setBackEnabled(self, v):
self.__back_button.setEnabled(v)
def setNextEnabled(self, v):
self.__next_button.setEnabled(v)
class CurrentKarteLabel(QtWidgets.QLabel):
def __init__(self, parent=None):
super(CurrentKarteLabel, self).__init__(parent=parent)
self.setObjectName("medic_current_karte")
class StatusLabel(QtWidgets.QLabel):
def __init__(self, parent=None):
super(StatusLabel, self).__init__(parent=parent)
self.setObjectName("status_label")
self.setFixedWidth(70)
self.__ready_icon = QtGui.QPixmap(os.path.join(IconDir, "success.png")).scaled(16, 16)
self.__success_icon = QtGui.QPixmap(os.path.join(IconDir, "success.png")).scaled(16, 16)
self.__failure_icon = QtGui.QPixmap(os.path.join(IconDir, "failure.png")).scaled(16, 16)
self.setStatus(model.Ready)
def setStatus(self, status):
if status is model.Ready:
self.setText("<font color='#b0b0b0'>Ready</font>")
elif status is model.Success:
self.setText("<font color='#1cc033'>Success</font>")
else:
self.setText("<font color='#eb2b66'>Failure</font>")
class TesterList(QtWidgets.QListView):
TesterChanged = QtCore.Signal("QModelIndex")
SingleTestTriggered = QtCore.Signal()
def __init__(self, parent=None):
super(TesterList, self).__init__(parent=parent)
self.setObjectName("medic_tester_list")
self.setUniformItemSizes(True)
self.source_model = model.TesterModel()
self.delegate = delegate.TesterDelegate()
self.setItemDelegate(self.delegate)
self.setModel(self.source_model)
self.__current_tester = None
def updateSelected(self):
for index in self.selectedIndexes():
self.update(index)
def currentTester(self):
return self.__current_tester
def reset(self):
self.clearSelection()
self.__current_tester = None
def selectionChanged(self, selected, deselected):
indexes = selected.indexes()
if not indexes:
self.clearSelection()
self.__current_tester = None
self.TesterChanged.emit(None)
else:
self.__current_tester = self.source_model.data(indexes[0], model.TesterItemRole)
self.TesterChanged.emit(indexes[0])
super(TesterList, self).selectionChanged(selected, deselected)
def mousePressEvent(self, evnt):
super(TesterList, self).mousePressEvent(evnt)
if QtCore.Qt.MouseButton.LeftButton == evnt.button():
index = self.indexAt(evnt.pos())
if index.row() < 0:
self.__current_tester = None
self.clearSelection()
elif QtCore.Qt.MouseButton.RightButton == evnt.button():
menu = QtWidgets.QMenu(self)
test = QtWidgets.QAction("Single Test", menu)
menu.addAction(test)
pos = self.mapToGlobal(evnt.pos())
menu.popup(QtCore.QPoint(pos.x() - 10, pos.y() - 10))
test.triggered.connect(self.__testTriggered)
def __testTriggered(self):
self.SingleTestTriggered.emit()
class KarteList(QtWidgets.QListView):
KarteChanged = QtCore.Signal("QModelIndex")
def __init__(self, parent=None):
super(KarteList, self).__init__(parent=parent)
self.setObjectName("medic_karte_list")
self.setUniformItemSizes(True)
self.source_model = model.KarteModel()
self.delegate = delegate.KarteDelegate()
self.setModel(self.source_model)
self.setItemDelegate(self.delegate)
self.__current_karte = None
def currentKarte(self):
return self.__current_karte
def selectionChanged(self, selected, deselected):
indexes = selected.indexes()
if not indexes:
self.clearSelection()
self.__current_karte = None
self.KarteChanged.emit(None)
else:
self.__current_karte = self.source_model.data(indexes[0], model.KarteItemRole)
self.KarteChanged.emit(indexes[0])
super(KarteList, self).selectionChanged(selected, deselected)
def mousePressEvent(self, evnt):
if QtCore.Qt.MouseButton.LeftButton == evnt.button():
index = self.indexAt(evnt.pos())
if index.row() < 0:
self.clearSelection()
self.__current_karte = None
super(KarteList, self).mousePressEvent(evnt)
class NumericLine(QtWidgets.QLineEdit):
RegexInt = re.compile("[^0-9-]")
RegexFloat = re.compile("[^0-9-.]")
def __init__(self, parent=None):
super(NumericLine, self).__init__(parent)
self.__regex = None
self.textEdited.connect(self.__regexCheck)
def __regexCheck(self, txt):
if self.__regex and txt:
self.setText(self.__regex.sub("", txt))
@staticmethod
def CreateIntLine():
e = NumericLine()
e.__regex = NumericLine.RegexInt
return e
@staticmethod
def CreateFloatLine():
e = NumericLine()
e.__regex = NumericLine.RegexFloat
return e
class ReportList(QtWidgets.QListView):
def __init__(self, parent=None):
super(ReportList, self).__init__(parent)
self.source_model = model.ReportModel()
self.setModel(self.source_model)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.delegate = delegate.ReportDelegate()
self.setItemDelegate(self.delegate)
def setReportItems(self, report_items):
self.source_model.setReportItems(report_items)
def selectionChanged(self, selected, deselected):
indexes = selected.indexes()
functions.ClearSelection()
for index in self.selectedIndexes():
report = self.source_model.data(index, model.ReportRole)
report.addSelection()
super(ReportList, self).selectionChanged(selected, deselected)
def mousePressEvent(self, evnt):
if QtCore.Qt.MouseButton.LeftButton == evnt.button():
index = self.indexAt(evnt.pos())
if index.row() < 0:
self.clearSelection()
super(ReportList, self).mousePressEvent(evnt)
class TesterDetailWidget(QtWidgets.QWidget):
ReportsChanged = QtCore.Signal()
def __init__(self, parent=None):
super(TesterDetailWidget, self).__init__(parent)
self.setObjectName("tester_detail_widget")
self.__tester_item = None
self.__params = []
self.__param_container = None
self.__qt_top_layout = None
self.__qt_parameter_layout = None
self.__qt_bottom_layout = None
self.__qt_test_label = None
self.__qt_report_list = None
self.__qt_fix_selected_button = None
self.__qt_fix_all_button = None
self.__createWidgets()
self.__clear()
def onReset(self):
self.__clear()
def reset(self):
self.__clear()
def setTesterItem(self, testerItem):
self.__tester_item = testerItem
self.__setTester(self.__tester_item)
self.__setReportItems(self.__tester_item.reports())
def __setTester(self, testerItem):
self.__setTesterName(testerItem.name())
self.__setDescription(testerItem.description())
self.__clearParameters()
self.__setParameters(testerItem.parameters())
self.__setFixable(testerItem.isFixable())
def __setReportItems(self, report_items):
self.__qt_report_list.setReportItems(report_items)
if not report_items:
self.__setFixable(False)
def __createWidgets(self):
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
main_layout.setContentsMargins(0, 0, 0, 0)
# frame
frame = QtWidgets.QFrame()
frame.setObjectName("detail_frame")
main_layout.addWidget(frame)
frame_layout = QtWidgets.QVBoxLayout()
frame.setLayout(frame_layout)
frame_layout.setContentsMargins(10, 10, 10, 10)
# layout
self.__qt_parameter_layout = QtWidgets.QVBoxLayout()
button_layout = QtWidgets.QHBoxLayout()
# widgets
self.__qt_tester_label = QtWidgets.QLabel()
self.__qt_description = QtWidgets.QTextEdit()
self.__qt_description.setFixedHeight(50)
self.__qt_description.setReadOnly(True)
self.__qt_tester_label.setObjectName("detail_tester_label")
self.__qt_description.setObjectName("detail_tester_description")
self.__qt_report_list = ReportList()
self.__qt_report_list.setObjectName("detial_report_list")
self.__qt_fix_selected_button = QtWidgets.QPushButton("Fix Selected")
self.__qt_fix_all_button = QtWidgets.QPushButton("Fix All")
self.__qt_fix_selected_button.setObjectName("detail_button")
self.__qt_fix_all_button.setObjectName("detail_button")
self.__qt_fix_selected_button.setMaximumWidth(100)
self.__qt_fix_all_button.setMaximumWidth(100)
button_layout.addWidget(self.__qt_fix_selected_button)
button_layout.addWidget(self.__qt_fix_all_button)
frame_layout.addWidget(self.__qt_tester_label)
frame_layout.addSpacing(20)
frame_layout.addWidget(self.__qt_description)
frame_layout.addWidget(self.__qt_report_list)
frame_layout.addLayout(self.__qt_parameter_layout)
frame_layout.addLayout(button_layout)
self.__qt_fix_all_button.clicked.connect(self.__fixAll)
self.__qt_fix_selected_button.clicked.connect(self.__fixSelected)
def __clear(self):
self.__tester_item = None
self.__qt_report_list.setReportItems([])
self.__setTesterName("")
self.__setFixable(False)
self.__setDescription("")
self.__clearParameters()
def __setTesterName(self, name):
self.__qt_tester_label.setText(name)
def __setDescription(self, desc):
self.__qt_description.setText(desc)
def __setFixable(self, enable):
self.__qt_fix_selected_button.setEnabled(enable)
self.__qt_fix_all_button.setEnabled(enable)
def __clearLayout(self, layout):
while (True):
item = layout.takeAt(0)
if item:
l = item.layout()
w = item.widget()
if l:
self.__clearLayout(l)
if w:
layout.removeWidget(w)
w.setParent(None)
else:
break
def __clearParameters(self):
self.__params = []
self.__param_container = None
self.__clearLayout(self.__qt_parameter_layout)
def __setParameters(self, params):
self.__param_container = params
for info in params.getParamInfos():
p_name, p_label, p_type, p_default = info
widget, function = ParameterFunctions.CreateWidget(info)
if widget:
layout = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(p_label)
label.setObjectName("parameter_label")
layout.addWidget(label)
layout.addWidget(widget)
self.__params.append({"name": p_name, "widget": widget, "function": function})
self.__qt_parameter_layout.addLayout(layout)
def __fixAll(self):
if self.__tester_item:
ParameterFunctions.SetParmeterValue(self.__param_container, self.__params)
remove_items = []
for report in self.__tester_item.reports():
if self.__tester_item.fix(report, self.__param_container):
remove_items.append(report)
self.__tester_item.removeReports(remove_items)
self.__setReportItems(self.__tester_item.reports())
self.ReportsChanged.emit()
def __fixSelected(self):
if self.__tester_item:
ParameterFunctions.SetParmeterValue(self.__param_container, self.__params)
remove_items = []
all_reports = self.__tester_item.reports()
for index in map(lambda x: x.row(), self.__qt_report_list.selectedIndexes()):
report = all_reports[index]
if self.__tester_item.fix(report, self.__param_container):
remove_items.append(report)
self.__tester_item.removeReports(remove_items)
self.__setReportItems(self.__tester_item.reports())
self.ReportsChanged.emit()
class TopBarWidget(QtWidgets.QFrame):
BackClicked = QtCore.Signal()
NextClicked = QtCore.Signal()
def __init__(self, parent=None):
super(TopBarWidget, self).__init__(parent=parent)
self.setObjectName("medic_top_bar")
self.__browser_button_widget = None
self.__current_karte_label = None
self.reset_button = None
self.test_button = None
self.status_label = None
self.__phase_items = {}
self.__phase = 0
self.__makeWidgets()
self.setPhase(0)
def setBrowserButtonEnabled(self, prevValue, nextValue):
self.__browser_button_widget.setBackEnabled(prevValue)
self.__browser_button_widget.setNextEnabled(nextValue)
def setCurrentKarteName(self, name):
self.__current_karte_label.setText(name)
def phase(self):
return self.__phase
def next(self):
self.setPhase(self.__phase + 1)
def back(self):
self.setPhase(self.__phase - 1)
def setPhase(self, phase):
self.__phase = phase
for p, items in self.__phase_items.iteritems():
if p == self.__phase:
for item in items:
item.show()
else:
for item in items:
item.hide()
def __makeWidgets(self):
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(main_layout)
horizon_layout = QtWidgets.QHBoxLayout()
horizon_layout.setSpacing(10)
horizon_layout.setContentsMargins(0, 0, 0, 0)
self.__browser_button_widget = BrowserButtonWidget()
self.reset_button = QtWidgets.QPushButton()
self.test_button = QtWidgets.QPushButton()
self.reset_button.setObjectName("reset_button")
self.test_button.setObjectName("test_button")
self.status_label = StatusLabel()
self.__current_karte_label = CurrentKarteLabel()
self.__phase_items[1] = [self.reset_button, self.test_button, self.status_label]
horizon_layout.addWidget(self.__browser_button_widget)
horizon_layout.addSpacing(20)
horizon_layout.addWidget(self.reset_button)
horizon_layout.addWidget(self.test_button)
horizon_layout.addWidget(self.status_label)
horizon_layout.addStretch(9999)
horizon_layout.addWidget(self.__current_karte_label)
main_layout.addLayout(horizon_layout)
self.__browser_button_widget.BackClicked.connect(self.BackClicked.emit)
self.__browser_button_widget.NextClicked.connect(self.NextClicked.emit)
class MainWidget(QtWidgets.QWidget):
ConditionChanged = QtCore.Signal(bool, bool)
KarteChanged = QtCore.Signal(str)
StatusChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent=parent)
self.setObjectName("medic_main_widget")
self.__kartes_widget = None
self.__testers_widget = None
self.__phase = 0
self.__phase_widgets = {}
self.__callback_ids = []
self.__makeWidgets()
self.setPhase(0)
def phase(self):
return self.__phase
def next(self):
self.setPhase(self.__phase + 1)
def back(self):
self.setPhase(self.__phase - 1)
def setPhase(self, p):
self.__phase = p
for phase, widgets in self.__phase_widgets.iteritems():
if phase is p:
for widget in widgets:
widget.show()
else:
for widget in widgets:
widget.hide()
if self.__phase is 0:
able_back = False
able_next = True if self.__kartes_widget.currentKarte() else False
self.__testers_widget.reset()
else:
able_back = True
able_next = False
if self.__phase is 1:
self.reset()
self.ConditionChanged.emit(able_back, able_next)
def __makeWidgets(self):
main_layout = QtWidgets.QHBoxLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
self.setLayout(main_layout)
self.__kartes_widget = KarteList()
self.__testers_widget = TesterList()
self.__detail_widget = TesterDetailWidget()
## phase 0
main_layout.addWidget(self.__kartes_widget)
self.__phase_widgets[0] = [self.__kartes_widget]
## phase 2
h_layout = QtWidgets.QHBoxLayout()
h_layout.addWidget(self.__testers_widget)
h_layout.addWidget(self.__detail_widget)
self.__phase_widgets[1] = [self.__testers_widget, self.__detail_widget]
main_layout.addLayout(h_layout)
## signal
self.__kartes_widget.KarteChanged.connect(self.__karteChanged)
self.__testers_widget.TesterChanged.connect(self.__testerChanged)
self.__detail_widget.ReportsChanged.connect(self.__reportsChanged)
self.__testers_widget.SingleTestTriggered.connect(self.__singleTest)
## set maya event callback
self.__callback_ids.append(functions.registSceneOpenCallback(self.__sceneChanged))
self.__callback_ids.append(functions.registNewSceneOpenCallback(self.__sceneChanged))
self.destroyed.connect(self.__removeCallbacks)
def __removeCallbacks(self):
functions.removeCallbacks(self.__callback_ids)
def __sceneChanged(self, *args):
self.reset()
def reset(self):
karte_item = self.__kartes_widget.currentKarte()
if karte_item:
karte_item.reset()
self.StatusChanged.emit(model.Ready)
self.update()
tester_item = self.__testers_widget.currentTester()
self.__detail_widget.reset()
if tester_item:
self.__detail_widget.setTesterItem(tester_item)
def test(self):
self.__detail_widget.reset()
karte_item = self.__kartes_widget.currentKarte()
if karte_item:
karte_item.testAll(testerCallback=self.forceUpdate)
self.StatusChanged.emit(karte_item.status())
self.update()
tester_item = self.__testers_widget.currentTester()
if tester_item:
self.__detail_widget.setTesterItem(tester_item)
def __singleTest(self):
self.__detail_widget.reset()
karte_item = self.__kartes_widget.currentKarte()
tester_item = self.__testers_widget.currentTester()
if karte_item and tester_item:
karte_item.test(tester_item, testerCallback=self.forceUpdate)
self.StatusChanged.emit(karte_item.status())
self.update()
self.__detail_widget.setTesterItem(tester_item)
def forceUpdate(self):
self.update()
QtWidgets.QApplication.processEvents()
def __testerChanged(self, current):
tester_item = self.__testers_widget.model().data(current, model.TesterItemRole)
if tester_item:
self.__detail_widget.setTesterItem(tester_item)
else:
self.__detail_widget.reset()
def __reportsChanged(self):
self.__testers_widget.updateSelected()
karte_item = self.__kartes_widget.currentKarte()
self.StatusChanged.emit(karte_item.status())
def __karteChanged(self, current):
able_back = False if self.__phase is 0 else True
able_next = False
karte_model = self.__kartes_widget.model()
tester_model = self.__testers_widget.model()
karte_item = karte_model.data(current, model.KarteItemRole)
if karte_item:
self.KarteChanged.emit(karte_item.name())
tester_model.setTesterItems(karte_model.data(current, model.KarteTesterItemsRole))
able_next = True
else:
self.KarteChanged.emit("")
tester_model.setTesterItems([])
self.ConditionChanged.emit(able_back, able_next)
| [
"defreturnnone@gmail.com"
] | defreturnnone@gmail.com |
0c7109401894b8ab6fa958daf9320f6f6999c573 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03231/s342511104.py | c53bcfef1be5b00fe39ad9752b5ac05a7a1bf748 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from math import gcd
N, M = map(int, input().split())
S = input()
T = input()
L = N*M // gcd(N, M)
for i in range(N):
if M*i % N == 0:
j = M*i // N
if S[i] != T[j]:
print(-1)
exit()
print(L)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
82e9dee0741767de9532be152cdc0e12d0ec1f4d | 736f965dd6fde66de8df7df3e84daffba3adae9e | /234_Palindrome_Linked_List.py | 95296a7660f043fd3aeaf021d825588be6d307d3 | [] | no_license | cl2547/my_leetcode | 6982d7a6dc4b2647bf49d30c55bbef2988f67381 | 8531c63ea7501e3fd22c60399550594c7053df85 | refs/heads/master | 2020-03-26T22:52:43.242215 | 2019-01-17T17:53:31 | 2019-01-17T17:53:31 | 145,493,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
#naive approach: compare head and tail
if head == None:
return True
temp = []
shouldBreak = 0
while True:
if shouldBreak == 1:
break
temp.append(head.val)
if head.next == None:
shouldBreak = 1
if head.next and shouldBreak == 0:
head = head.next
print(temp)
left = 0
right = len(temp)-1
while left <= right:
if temp[left] != temp[right]:
return False
left += 1
right -=1
return True | [
"cl2547@cornell.edu"
] | cl2547@cornell.edu |
a80bc0b7bec454901ca4b3999a54aa41c6aa7bd6 | 857d134efb9478deb5fe883d1365afd983de30eb | /util.py | 4dcbf75d5e23f4f4adcf6af7bffbb0bf1af83b87 | [] | no_license | riznagauri/customer_service_chatbot | 5568154a818521caaf29e6fd2f24567e8eeb5527 | 6c2e4597e3509c15db794338a6317243b9aff93c | refs/heads/main | 2023-02-02T04:38:46.419657 | 2020-12-13T15:15:44 | 2020-12-13T15:15:44 | 319,928,492 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | import torch.nn as nn
import collections
def embedding_size_from_name(name):
return int(name.strip().split('.')[-1][:-1])
def print_dim(name, tensor):
print("%s -> %s" % (name, tensor.size()))
class RNNWrapper(nn.Module):
"""
Wrapper around GRU or LSTM RNN. If underlying RNN is GRU, this wrapper does nothing, it just forwards inputs and
outputs. If underlying RNN is LSTM this wrapper ignores LSTM cell state (s) and returns just hidden state (h).
This wrapper allows us to unify interface for GRU and LSTM so we don't have to treat them differently.
"""
LSTM = 'LSTM'
GRU = 'GRU'
def __init__(self, rnn):
super(RNNWrapper, self).__init__()
assert isinstance(rnn, nn.LSTM) or isinstance(rnn, nn.GRU)
self.rnn_type = self.LSTM if isinstance(rnn, nn.LSTM) else self.GRU
self.rnn = rnn
def forward(self, *input):
rnn_out, hidden = self.rnn(*input)
if self.rnn_type == self.LSTM:
hidden, s = hidden # ignore LSTM cell state s
return rnn_out, hidden
# Metadata used to describe dataset
Metadata = collections.namedtuple('Metadata', 'vocab_size padding_idx vectors')
| [
"59862776+riznagauri@users.noreply.github.com"
] | 59862776+riznagauri@users.noreply.github.com |
a9f7f9473b67b88fda721389ce806686a7721fdd | 15c05333389b9cf871087d2c22458654a800f098 | /example/WriteFileExample.py | 3562d5e09380f0ee736c53c9096394cc36fbc38f | [] | no_license | Ratta-Chindasilpa/CP3-Ratta-Chindasilpa | 1646cbe11012d09009e024028aa4e7959f44dd7b | ee07ebdedc6e46220636904e1afbf5254ae9dfb5 | refs/heads/master | 2022-12-25T03:31:23.446207 | 2020-09-23T14:55:32 | 2020-09-23T14:55:32 | 293,116,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | file = open("demo.txt", "a") #เพิ่มข้อความต่อท้าย (append)
file.write("Haha From Next Lecture")
file = open("demo.txt", "w") #เขียนทับไปเลย
file.write("Eiei \"w\"")
'''po = [1,2,3]
po.append(5)
print(po)'''
| [
"70811729+Ratta-Chindasilpa@users.noreply.github.com"
] | 70811729+Ratta-Chindasilpa@users.noreply.github.com |
25e27b9ba8f9eab7e8a7697f65b503252490fbb6 | 950573f2befa010783c5f061983486e18006878a | /modules/methods.py | 393a39d90cb855baa534fd4550ea8244bf221678 | [] | no_license | belousovgm/dmarket_bot | 44ea587b68d9314d71ec739ce92866acfdd71e9b | ace1e98b5b28b2390f9ede9726fdb23f7a2a8ff4 | refs/heads/master | 2023-08-05T22:51:28.965645 | 2021-09-24T17:15:13 | 2021-09-24T17:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | from pyti.simple_moving_average import simple_moving_average as sma
from typing import List
from api.schemas import LastSale
def mov_av_5(history: List[LastSale]) -> list:
prices = [i.Price.Amount for i in history]
prices.reverse()
mov_av = [i for i in list(sma(prices, 5))]
mov_av.reverse()
return mov_av
| [
"timagr615@gmail.com"
] | timagr615@gmail.com |
6a8852a241aa4d0975a748a95537d55dd3466a75 | 5b0dcb0a1f578d04b0c195a30ff1a7ccfd29deaf | /tests/import/pkg9/mod2.py | f4b3e265fb51091766734e9cb7f4f9ca064b8b03 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | rk-exxec/micropython | c87ecd8743413c8a65e64fd82cd1910ccaed96c6 | d529c20674131b9ce36853b92784e901a1bc86f4 | refs/heads/master | 2023-08-18T01:18:34.511851 | 2023-08-09T03:19:18 | 2023-08-09T03:22:57 | 136,508,987 | 7 | 16 | MIT | 2023-01-13T20:48:50 | 2018-06-07T17:17:01 | C | UTF-8 | Python | false | false | 19 | py | from . import mod2
| [
"damien@micropython.org"
] | damien@micropython.org |
390906999c1c0e7466b96f59d5a0c7e6cc9ab7d4 | 986d78fdcb40f4ee7db15bafc77070c087d16b63 | /studies/MultiBoomSparMass_v2/point_design.py | f0268c72689269395046cb2711265a992c71d693 | [
"MIT"
] | permissive | hdolfen/AeroSandbox | 8578b5e36b9a4be69801c1c9ad8819965f236edb | 4c48690e31f5f2006937352a63d653fe268c42c3 | refs/heads/master | 2023-01-20T15:36:58.111907 | 2020-11-24T13:11:44 | 2020-11-24T13:11:44 | 313,655,155 | 0 | 0 | MIT | 2020-11-24T13:11:46 | 2020-11-17T15:05:02 | null | UTF-8 | Python | false | false | 1,885 | py | ### Imports
from aerosandbox.structures.beams import *
import copy
n_booms = 1
# n_booms = 2
# load_location_fraction = 0.50
# n_booms = 3
# load_location_fraction = 0.60
mass = 80 * 6
span = 7.3
### Set up problem
opti = cas.Opti()
beam = TubeBeam1(
opti=opti,
length=span / 2,
points_per_point_load=100,
diameter_guess=10,
thickness=1e-3,
bending=True,
torsion=False,
max_allowable_stress=570e6,
)
lift_force = 9.81 * mass
# load_location = opti.variable()
# opti.set_initial(load_location, 12)
# opti.subject_to([
# load_location > 1,
# load_location < beam.length - 1,
# ])
assert (n_booms == np.array([1,2,3])).any()
if n_booms == 2 or n_booms == 3:
load_location = beam.length * load_location_fraction
beam.add_point_load(location = load_location, force = -lift_force / n_booms)
beam.add_elliptical_load(force=lift_force / 2)
beam.setup()
# Constraints (in addition to stress)
opti.subject_to([
# beam.u[-1] < 2, # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
# beam.u[-1] > -2 # tip deflection. Source: http://web.mit.edu/drela/Public/web/hpa/hpa_structure.pdf
beam.du * 180 / cas.pi < 10, # local dihedral constraint
beam.du * 180 / cas.pi > -10, # local anhedral constraint
cas.diff(beam.nominal_diameter) < 0, # manufacturability
])
# # Zero-curvature constraint (restrict to conical tube spars only)
# opti.subject_to([
# cas.diff(cas.diff(beam.nominal_diameter)) == 0
# ])
opti.minimize(beam.mass)
p_opts = {}
s_opts = {}
s_opts["max_iter"] = 1e6 # If you need to interrupt, just use ctrl+c
# s_opts["mu_strategy"] = "adaptive"
opti.solver('ipopt', p_opts, s_opts)
sol = opti.solve()
beam_sol = copy.deepcopy(beam).substitute_solution(sol)
spar_mass = beam_sol.mass * 2
# Run a sanity check
beam_sol.draw_bending()
print("Spar mass:", spar_mass) | [
"peterdsharpe@gmail.com"
] | peterdsharpe@gmail.com |
8e649d09bcfff639dd293baa38172402faa65ddf | 6d0da4d45bbd162bb30b6818a8a0c9587391c3f4 | /day1/InputDemo.py | 6da6df0cdf22976191cb83af571a916e52ca0bc7 | [] | no_license | chemin233/pythonPractice | 2cab4d7bebf3923486dbac641a49ded38e9ebd3e | a188a0fef63f16739c7d239d18d24c1a31be847c | refs/heads/master | 2021-04-06T20:42:56.360920 | 2018-03-17T08:43:23 | 2018-03-17T08:43:23 | 125,379,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | # -*- coding: utf-8 -*-
import keyword
print(keyword.kwlist)
print(r"r'''hello,\n world'''")
| [
"chemin233@163.com"
] | chemin233@163.com |
dd70383bd799a8f104e751a763ba69d1a5ff85be | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03330/s307392217.py | de40af9fdc43a32599ce03bad31896ab49cb00ac | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | def main():
from itertools import permutations
N, C = map(int, input().split())
change_cost = [[int(x) for x in input().split()] for _ in range(C)]
init_color = [[int(x) - 1 for x in input().split()] for _ in range(N)]
ctr = [[0] * C for _ in range(3)]
for r in range(N):
for c in range(N):
p = (r + c) % 3
color = init_color[r][c]
ctr[p][color] += 1
mi = 1000 * 500 * 500 + 1
for perm in permutations(range(C), r=3):
it = iter(perm)
t = 0
for p in range(3):
color_to_be = next(it)
for color, count in enumerate(ctr[p]):
t += change_cost[color][color_to_be] * count
mi = min(mi, t)
print(mi)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e91623921eb5c27af51e43a64196da880e6aa9fd | 195308f855a492098cbea0a102bc55aecfe2b313 | /IMAGENET/imagenet_demo.py | da7c245bfe0490206509e26ecba7641c8aa56fbc | [] | no_license | jonathanhhb/DataScience | 4fcc6ed9493f72839e9c67ce450c412ba864563c | 8568c8b1ddb1cc737c8e314198d7773dfd58fcd0 | refs/heads/master | 2021-06-26T07:33:03.639850 | 2019-05-22T20:11:04 | 2019-05-22T20:11:04 | 114,664,168 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #!/usr/bin/python
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
import urllib2
import sys
model = ResNet50(weights='imagenet')
response = urllib2.urlopen( sys.argv[1] )
image_from_web = response.read( response )
#img_path = 'car_battery.jpg'
with open( "/var/tmp/image_to_rec.jpg", "w" ) as tmp_img:
tmp_img.write( image_from_web )
img_path = "/var/tmp/image_to_rec.jpg"
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
| [
"jhb4@cornell.edu"
] | jhb4@cornell.edu |
2d527612149fb4de87f1e28e4faa947f02b7d21c | 407ca85cd6051a50884f38bb0514a6301f8e7101 | /Consolidated/POM/process_igd.py | 95e19baa709e2dfd1c4abcd641e5a4c6d49fe827 | [] | no_license | vivekaxl/MOLearner | 5ae4f40027b814ae5b20aaaeb255d6041505c0b9 | 236bf61e8ee1663eabcd73f355070022f908acfa | refs/heads/master | 2021-01-23T01:12:30.836318 | 2017-04-27T05:54:39 | 2017-04-27T05:54:39 | 85,847,238 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from __future__ import division
import pickle
import os
from sk import rdivDemo
pickle_files = [f for f in os.listdir(".") if ".py" not in f]
content = pickle.load(open(pickle_files[0]))
problems = content.keys()
prob = {}
for problem in problems:
al2 = pickle.load(open('al2_POM.p'))
al = pickle.load(open('al_POM.p'))
mmre = pickle.load(open('mmre_POM.p'))
nsgaii = pickle.load(open('nsgaii_POM.p'))
rank = pickle.load(open('rank_POM.p'))
spea2 = pickle.load(open('spea2_POM.p'))
sway5 = pickle.load(open('SWAY5_POM.p'))
lists = list()
lists.append(['AL2'] + al2[problem]['igd'])
lists.append(['AL'] + al[problem]['igd'])
lists.append(['MMRE'] + mmre[problem]['igd'])
lists.append(['NSGAII'] + nsgaii[problem]['igd'])
lists.append(['Rank'] + rank[problem]['igd'])
lists.append(['SPEA2'] + spea2[problem]['igd'])
lists.append(['SWAY5'] + sway5[problem]['igd'])
rdivDemo( problem.replace('_', '\_'), "", lists, globalMinMax=False,
isLatex=True)
| [
"vivekaxl@gmail.com"
] | vivekaxl@gmail.com |
d59d7dc1df81da166c9d49e6eaa0d44cb787a00c | 23ea8b62ea8823cc2609cccdb49ec6195f9528b0 | /experimentation/alt_hough_lines/alt_hough_lines.py | bdcc5a8d35798e59f51d8ec736fb796bc036aa31 | [] | no_license | ashvath100/computer-vision | 57d16c54b1fcc8e1502c9b387fb9e1a3e5817455 | c3c9b82413818c2603cde5008787a59e60e248af | refs/heads/master | 2020-12-13T01:01:53.553224 | 2020-01-15T13:35:33 | 2020-01-15T13:35:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | import numpy as np
import imageio
import math
import cv2
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114]).astype(np.uint8)
def hough_line(img, angle_step=1, lines_are_white=True, value_threshold=5):
"""
Hough transform for lines
Input:
img - 2D binary image with nonzeros representing edges
angle_step - Spacing between angles to use every n-th angle
between -90 and 90 degrees. Default step is 1.
lines_are_white - boolean indicating whether lines to be detected are white
value_threshold - Pixel values above or below the value_threshold are edges
Returns:
accumulator - 2D array of the hough transform accumulator
theta - array of angles used in computation, in radians.
rhos - array of rho values. Max size is 2 times the diagonal
distance of the input image.
"""
# Rho and Theta ranges
thetas = np.deg2rad(np.arange(-90.0, 90.0, angle_step))
width, height = img.shape
diag_len = int(round(math.sqrt(width * width + height * height)))
rhos = np.linspace(-diag_len, diag_len, diag_len * 2)
# Cache some resuable values
cos_t = np.cos(thetas)
sin_t = np.sin(thetas)
num_thetas = len(thetas)
# Hough accumulator array of theta vs rho
accumulator = np.zeros((2 * diag_len, num_thetas), dtype=np.uint8)
# (row, col) indexes to edges
are_edges = img > value_threshold if lines_are_white else img < value_threshold
y_idxs, x_idxs = np.nonzero(are_edges)
# Vote in the hough accumulator
for i in range(len(x_idxs)):
x = x_idxs[i]
y = y_idxs[i]
for t_idx in range(num_thetas):
# Calculate rho. diag_len is added for a positive index
rho = diag_len + int(round(x * cos_t[t_idx] + y * sin_t[t_idx]))
accumulator[rho, t_idx] += 1
return accumulator, thetas, rhos
def show_hough_line(img, accumulator, thetas, rhos, save_path=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(10, 10))
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(
accumulator, cmap='jet',
extent=[np.rad2deg(thetas[-1]), np.rad2deg(thetas[0]), rhos[-1], rhos[0]])
ax[1].set_aspect('equal', adjustable='box')
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
# plt.axis('off')
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
imgpath = 'E:\\Aditya\\Computer Vision VUAV\\Picture.jpeg'
img = cv2.imread(imgpath)
if img is not None:
print("Hi")
if img.ndim == 3:
## img = rgb2gray(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (64,64), interpolation = cv2.INTER_AREA)
print("Image converted, now starting hough lines")
accumulator, thetas, rhos = hough_line(img)
print("Hey")
show_hough_line(img, accumulator, save_path='E:\\Aditya\\Computer Vision VUAV\\output.jpeg')
print("-------------- Done exeucuting -------------------------- ")
else:
print("Incorrect path")
| [
"47158509+oke-aditya@users.noreply.github.com"
] | 47158509+oke-aditya@users.noreply.github.com |
7545e75a3e19f5e5df2e9714c8b328dc935a65cf | 1dc7111e443e52f788e0ce3c57bb0e89e5ea73dc | /ecocast_celery/tasks/ak_air_quality_task.py | 2926facbdb14eba85d1456589e234c39b38f0fd8 | [] | no_license | myselfub/Ecocast_Celery | b11b65fd414b9371392f60f3f133addf53507324 | f5a52fc566578089fc1d05da1b5a4a0fcddc3dbc | refs/heads/master | 2023-06-01T14:47:31.452540 | 2021-06-13T15:37:43 | 2021-06-13T15:37:43 | 376,572,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,426 | py | import requests
from ..ecocast_conf import logger, psql_connect, psql_select
from ..ecocast_conf import AIRKOREA_HOST, AIRKOREA_KEY, REQUEST_TIME_OUT
""" Airkorea Air Quality Data Task """
def insert_ak_air_quality_list():
""" List of station names to be called to API of the Airkorea's air quality data
Args:
Returns:
station_result (list): Station name in which the column of is_active is true
True (bool): Request Timeout Except or The database time and API time are the same
Examples:
>>> print(insert_ak_air_quality_list)
[('고읍',), ('중구',), ('한강대로',), ('종로구',), ... ]
"""
logger.debug('insert_ak_air_quality_list 실행')
conn = psql_connect()
select_time_sql = 'SELECT airkorea_air_quality_time FROM airkorea_air_quality ' \
'ORDER BY airkorea_air_quality_id DESC LIMIT 1'
select_station_sql = 'SELECT airkorea_station_name FROM airkorea_station WHERE airkorea_station_is_active = TRUE'
time_result = psql_select(conn, select_time_sql)
station_result = psql_select(conn, select_station_sql)
conn.close()
if time_result:
db_time = time_result[0][0].strftime('%Y-%m-%d %H:%M')
else:
db_time = '2010-01-01 00:00'
try:
ak_time = call_ak_air_quality(station_result[0][0])[0]['dataTime']
except TypeError:
return True
if db_time == ak_time:
logger.debug('db_time : ' + db_time + ', ak_time : ' + ak_time)
return True
return station_result
def insert_ak_air_quality(station_name: str):
""" Receive AirKorea's air quality data and insert it into the database
Args:
station_name (str): Station name of Station data provided by Airkorea
Returns:
None (None): Success or no station data
Retry (str): Retry due to Request Timeout Except
True (bool): Call null_ak_air_quality due to null data
"""
logger.debug('insert_ak_air_quality(' + station_name + ') 실행')
try:
air_quality_data = call_ak_air_quality(station_name)[0]
except IndexError as ie:
logger.debug('insert_ak_air_quality(' + station_name + ') Except : ' + str(ie))
return None
except TypeError:
return 'Retry'
values_data = (
air_quality_data['dataTime'], air_quality_data['mangName'], air_quality_data['so2Value'],
air_quality_data['so2Grade'],
air_quality_data['coValue'], air_quality_data['coGrade'], air_quality_data['o3Value'],
air_quality_data['o3Grade'],
air_quality_data['no2Value'], air_quality_data['no2Grade'], air_quality_data['pm10Value'],
air_quality_data['pm10Grade'],
air_quality_data['pm10Value24'], air_quality_data['pm25Value'], air_quality_data['pm25Grade'],
air_quality_data['pm25Value24'],
air_quality_data['khaiValue'], air_quality_data['khaiGrade'], station_name)
conn = psql_connect()
curs = conn.cursor()
curs.execute(
'INSERT INTO airkorea_air_quality(airkorea_air_quality_time, airkorea_air_quality_mang, '
'airkorea_air_quality_so2, airkorea_air_quality_so2_grade, airkorea_air_quality_co, '
'airkorea_air_quality_co_grade, airkorea_air_quality_o3, airkorea_air_quality_o3_grade, '
'airkorea_air_quality_no2, airkorea_air_quality_no2_grade, '
'airkorea_air_quality_pm10, airkorea_air_quality_pm10_grade, airkorea_air_quality_pm10_forecast, '
'airkorea_air_quality_pm25, airkorea_air_quality_pm25_grade, airkorea_air_quality_pm25_forecast, '
'airkorea_air_quality_khai, airkorea_air_quality_khai_grade, airkorea_station_name) '
'VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)', values_data)
conn.commit()
curs.close()
conn.close()
for value_data in values_data:
if value_data == '' or value_data == '-':
return True
def null_ak_air_quality(station_name: str):
""" If receive AirKorea's air quality data in null value
Args:
station_name (str): Station name of station data provided by Airkorea
Returns:
None (None): Success or no station data
True (bool): Retry due to null data
"""
logger.debug('null_ak_air_quality(' + station_name + ') 실행')
try:
air_quality_data = call_ak_air_quality(station_name)[0]
except IndexError as e:
logger.debug('null_ak_air_quality(' + station_name + ') Except : ' + str(e))
return None
except TypeError:
return True
values_data = (
air_quality_data['so2Value'], air_quality_data['so2Grade'],
air_quality_data['coValue'], air_quality_data['coGrade'], air_quality_data['o3Value'],
air_quality_data['o3Grade'],
air_quality_data['no2Value'], air_quality_data['no2Grade'], air_quality_data['pm10Value'],
air_quality_data['pm10Grade'],
air_quality_data['pm10Value24'], air_quality_data['pm25Value'], air_quality_data['pm25Grade'],
air_quality_data['pm25Value24'],
air_quality_data['khaiValue'], air_quality_data['khaiGrade'], air_quality_data['dataTime'], station_name)
for value_ in values_data:
if value_ == '' or value_ == '-':
return True
conn = psql_connect()
curs = conn.cursor()
curs.execute(
'UPDATE airkorea_air_quality SET airkorea_air_quality_so2 = %s, airkorea_air_quality_so2_grade = %s, '
'airkorea_air_quality_co = %s, airkorea_air_quality_co_grade = %s, airkorea_air_quality_o3 = %s, '
'airkorea_air_quality_o3_grade = %s, airkorea_air_quality_no2 = %s, airkorea_air_quality_no2_grade = %s, '
'airkorea_air_quality_pm10 = %s, airkorea_air_quality_pm10_grade = %s, airkorea_air_quality_pm10_forecast = %s, '
'airkorea_air_quality_pm25 = %s, airkorea_air_quality_pm25_grade = %s, '
'airkorea_air_quality_pm25_forecast = %s, airkorea_air_quality_khai = %s, airkorea_air_quality_khai_grade = %s '
'WHERE airkorea_air_quality_time = %s AND airkorea_station_name = %s', values_data)
conn.commit()
curs.close()
conn.close()
def call_ak_air_quality(station_name: str):
""" Airkorea's air quality data API call
Args:
station_name (str): Station name of station data provided by Airkorea
Returns:
json_data['list'] (list): Airkorea's air quality data
True (bool): Request Timeout Except
Examples:
>>> print(call_ak_air_quality('고읍'))
[{'_returnType': 'json', 'coGrade': '1', 'coValue': '0.6', 'dataTerm': '', ... ]
"""
logger.debug('call_ak_air_quality(' + station_name + ') 실행')
url = AIRKOREA_HOST + 'ArpltnInforInqireSvc/getMsrstnAcctoRltmMesureDnsty'
params_ = {'serviceKey': AIRKOREA_KEY, 'numOfRows': 1, 'pageNo': 1, 'stationName': station_name,
'dataTerm': 'DAILY',
'ver': 1.3, 'returnType': 'json'}
try:
response = requests.get(url, params_, timeout=REQUEST_TIME_OUT)
json_data = response.json()
except (requests.exceptions.Timeout, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as e:
logger.debug('call_ak_air_quality(' + station_name + ') Except : ' + str(e))
return True
return json_data['response']['body']['items']
| [
"myselfub@gmail.com"
] | myselfub@gmail.com |
972dbe05aca9f060b4113b57fd5d6d3ecbf2223b | 337250ba29fc65e7652fb1a22d653770a3421e52 | /src/games/chat/chat.py | 03fc685b5d6c13776ad6ddb59fb7ca56e3358e70 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ollej/GoldQuest | 4dc2e527debc76558f9032bac2f88abb5d8f5f6e | b9913815bd1d1f304c34c3a3cc643b64310e66c6 | refs/heads/master | 2021-01-22T21:32:27.876981 | 2012-12-19T08:35:06 | 2012-12-19T08:35:06 | 1,999,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2011 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import random
from GoldFrame import GoldFrame
from datastorehelpers import *
class Game(GoldFrame.GamePlugin):
users = 0
_cfg = None
metadata = {
'name': 'Chat',
'gamekey': 'chat',
'personal_hero': False,
'broadcast_actions': ['chat'],
'actions': [
{
'key': 'chat',
'name': 'Chat',
'description': 'Send a chat message.',
'img': 'images/chat/icon-chat.png',
'tinyimg': 'images/chat/icon-chat.png',
'color': '#C30017',
'arguments': [
{
'type': 'input',
'key': 'chatmessage',
'name': 'Chat Message',
'description': 'Enter a message to send out to all visitors.',
},
],
},
],
'stats_img': 'images/chat/icon-chat.png',
'stats': [
{
'key': 'users',
'name': 'users',
'description': 'The number of recent users in the chat.',
'type': 'integer',
},
],
}
def template_charsheet(self):
return """
<h1 style="padding-top: 6px">Online Users: <span id="usersValue" class="usersValue">{{ users }}</span></h1>
"""
def action_chat(self, arguments=None):
"""
Make a prediction.
"""
return "<%s> %s" % (self._userid, arguments['chatmessage'])
| [
"Olle@Johansson.com"
] | Olle@Johansson.com |
cac740ed5390813fac0fa76c84b8190b7170522b | 020d26b74450c807e04540e0fb5777e54986ffe2 | /main.py | 67be3aba2b744de5601d18ea6565376f730b9533 | [] | no_license | GrDaniel/oreilly-playlists-scrapper | 6c85c84b2ecdbeea12fa0d4491b737a3d402d260 | e98c623d6e861ff3df74de3937a1b785ce257fc0 | refs/heads/master | 2023-07-05T16:33:51.626559 | 2021-08-31T21:01:47 | 2021-08-31T21:01:47 | 386,440,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,971 | py | import json
import logging
from converter import EbookConverter
from downloader import Downloader
from scrapper import OreillyPlaylistScrapper
from uploader import DropBoxClient
from utils import cfg, collect_local_files, PROJECT_DIR_PATH
class BooksSynchronizer(object):
def __init__(self):
logging.basicConfig(filename='BooksSynchronizer.log', level=logging.INFO)
self.scrapper = OreillyPlaylistScrapper()
self.downloader = Downloader()
self.converter = EbookConverter()
self.uploader = DropBoxClient()
self.oreilly_collection = []
self.new_books = []
def sync_books(self):
logging.info(f"***START***")
self.fetch_playlists_collection()
self.new_books = self.choose_books_to_download()
logging.info(f"New books found: {len(self.books)}")
logging.info(f"New books to download: {self.new_books}")
self.download_and_convert_books()
self.upload_books()
logging.info(f"***END***")
def fetch_playlists_collection(self):
self.scrapper.get_books()
def choose_books_to_download(self):
self.oreilly_collection = self.read_books_collection()
oreilly_books = [book.get('book_name') for book in self.oreilly_collection]
local_collection = [filename.split(".")[0] for filename in collect_local_files(cfg.get('DEST_DIR'))]
return [book for book in oreilly_books if book not in local_collection]
@staticmethod
def read_books_collection():
with open('books.json', 'r') as file:
return json.load(file)
def download_and_convert_books(self):
for book_name in self.new_books:
book_id = self._get_book_id(book_name)
self.downloader.download_book(book_id)
self.converter.convert_file_to_epub(book_name, book_id)
def _get_book_id(self, book_name):
for book in self.oreilly_collection:
if book.get('book_name') == book_name:
return book.get('book_id')
def upload_books(self):
for book_name in self.new_books:
src_path, dst_path = self._build_file_paths(book_name)
logging.info(f"Uploading file {book_name} ...")
self.uploader.upload_file(src_path, dst_path)
logging.info(f"File {book_name} uploaded!")
def _build_file_paths(self, book_name):
playlist = self.read_book_details_from_cfg(book_name)
src_path = f"{PROJECT_DIR_PATH}/converted_books/{book_name}.epub"
dbx_base_url = cfg.get('DROPBOX_DEST_DIR')
dst_path = f"{dbx_base_url}{playlist}/{book_name}.epub"
return src_path, dst_path
def read_book_details_from_cfg(self, book_name):
for book in self.oreilly_collection:
if book.get('book_name') == book_name:
return book.get('playlist_name')
if __name__ == '__main__':
synchronizer = BooksSynchronizer()
synchronizer.sync_books()
| [
"daniells92@o2.pl"
] | daniells92@o2.pl |
161618add3f39c9fe876a8d7b56a02309bb09785 | 2c68f9156087d6d338373f9737fee1a014e4546b | /src/vmware/azext_vmware/vendored_sdks/models/tracked_resource_py3.py | 27620df2d70b14816bdf6d808642b7b81ad668c9 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | anpaz/azure-cli-extensions | 8b0d4071c49840da9883f13cb0fd1f4515246ee0 | 847fd487fe61e83f2a4163a9393edc9555267bc2 | refs/heads/master | 2023-04-23T17:22:53.427404 | 2021-01-29T17:48:28 | 2021-01-29T18:01:33 | 257,394,204 | 2 | 0 | MIT | 2021-01-28T10:31:07 | 2020-04-20T20:19:43 | Python | UTF-8 | Python | false | false | 1,447 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
| [
"noreply@github.com"
] | noreply@github.com |
cb3a507be273487b2929ee7275904ae43f70bfb4 | 55b89a23b7f594cc529e91d1f4251c40ddc5bef1 | /Python Human Readable Date.py | 2d991129d4b20eed9c14f89ae744073d79ed13ea | [
"MIT"
] | permissive | codenoid/Python-Human-Readable-Date | 75b3849e047e30eaddb84ed275e2a7eba5e1348f | 40b6e113c0f5ec64d6bb099cfe9c5e934018b9c2 | refs/heads/master | 2021-01-21T06:39:53.724956 | 2017-02-27T05:02:01 | 2017-02-27T05:02:01 | 83,269,972 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | from datetime import datetime
# Inspired By Blurbs Framework - Human Readable Date
# Rubi Jihantoro - jihantoro8@gmail.com
now = datetime.today()
date = datetime(2017, 2, 24, 23, 36, 1) #your date
# Language
second = ' Second Ago'
minute = ' Minute Ago'
hours = ' Hours Ago'
ytd = 'Yesterday, At '
on = ' On '
dnow = [
now.strftime('%Y'),
now.strftime('%m'),
now.strftime('%d'),
now.strftime('%H'),
now.strftime('%M'),
now.strftime('%S'),
now.strftime('%p')
]
myd = [
date.strftime('%Y'),
date.strftime('%m'),
date.strftime('%d'),
date.strftime('%H'),
date.strftime('%M'),
date.strftime('%S'),
date.strftime('%p'),
date.strftime('%b')
]
dpo = int(myd[2])+1
def read():
if dnow[0]+dnow[1]+dnow[2]+dnow[3]+dnow[4] == myd[0]+myd[1]+myd[2]+myd[3]+myd[4]:
ns = int(dnow[5])
ms = int(myd[5])
nr = ns-ms
x = repr(nr) + second
print(x)
elif dnow[0]+dnow[1]+dnow[2]+dnow[3] == myd[0]+myd[1]+myd[2]+myd[3]:
ns = int(dnow[4])
ms = int(myd[4])
nr = ns-ms
x = repr(nr) + minute
print(x)
elif dnow[0]+dnow[1]+dnow[2] == myd[0]+myd[1]+myd[2]:
ns = int(dnow[3])
ms = int(myd[3])
nr = ns-ms
x = repr(nr) + hours
print(x)
elif dnow[0]+dnow[1]+dnow[2] == myd[0]+myd[1]+str(dpo):
x = ytd + myd[3] + ':' + myd[4] + myd[6]
print(x)
elif dnow[0]+dnow[1]+dnow[2] > myd[0]+myd[1]+str(dpo):
x = myd[2] + ' ' + myd[7] + ' ' + myd[0] + on + myd[3] + ':' + myd[4] + myd[6]
print(x)
read()
| [
"noreply@github.com"
] | noreply@github.com |
5bb25ec3477387e775d0d3cef9989cdcf87ecbe6 | 9f915ea92455e0fd11c462e3ebb124c7e67b6c7d | /simple_crud/create_db.py | 7a8bacf212efa767f3c7641af0cf5a37f6933990 | [] | no_license | twr14152/svedka | a3fa001195f6cad81f91d387a8d3ecb9083c4210 | 0d47f0514bd1f99e8b38df0fc6c6d8286ea84657 | refs/heads/master | 2022-11-15T06:25:09.588524 | 2022-11-11T01:03:17 | 2022-11-11T01:03:17 | 83,989,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | import sqlite3 as sql
#connect to SQLite
con = sql.connect('db_web.db')
#Create a Connection
cur = con.cursor()
#Drop users table if already exsist.
cur.execute("DROP TABLE IF EXISTS users")
#Create users table in db_web database
sql ='''CREATE TABLE "users" (
"UID" INTEGER PRIMARY KEY AUTOINCREMENT,
"UNAME" TEXT,
"PASSWORD" TEXT,
"OPERATIONS_COSTS" REAL
)'''
cur.execute(sql)
#commit changes
con.commit()
#close the connection
con.close()
| [
"noreply@github.com"
] | noreply@github.com |
45b9949ae0f0d840c1792793bacd388dbc458e59 | 428ee7363a2ad7d7916159eef595ee909d60eda8 | /tinifycli/display.py | 159775148d2d202861b1e4e03e7ed3e4fb096dc4 | [
"MIT"
] | permissive | pombredanne/tinify-cli | a2dee19e0096306393f039c26885ef4da38e08d9 | 37d1008d306d3f6459e4f6d72297c335a942cc1b | refs/heads/master | 2021-01-24T09:01:15.366205 | 2016-01-10T01:23:56 | 2016-01-10T01:23:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | # coding=utf-8
''' 显示层 '''
import logging
import Queue
import time
import threading
from . import shared_var
class TinifyCliDisplay(object):
def __init__(self, log_to_stderr=False, log_to_file=False):
self.logger = logging.getLogger('tinify-cli')
hndl = logging.StreamHandler()
if shared_var.is_debug:
hndl.setFormatter(
logging.Formatter(
fmt='[%(levelname)s] %(filename)s:%(lineno)s %(funcName)s'
' @ %(asctime)s => %(message)s',
datefmt='%H:%M:%S'))
hndl.setLevel(logging.DEBUG)
else:
hndl.setFormatter(
logging.Formatter(
fmt='[%(levelname)s] %(asctime)s : %(message)s',
datefmt='%H:%M:%S'))
hndl.setLevel(logging.INFO)
if log_to_stderr:
self.logger.addHandler(hndl)
self.logger.setLevel(logging.DEBUG)
if log_to_file:
filename = time.strftime(
'tinify-cli.%Y-%m-%d-%H-%M-%S.log',
time.gmtime())
self.logger.addHandler(
logging.FileHandler(
filename,
encoding='utf-8'))
self.mailbox = Queue.Queue() # for progress_bar_view
def set_logging_level(self, level):
'''
level 可以取 "NOTSET", "DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"
中的任何一个
'''
self.logger.setLevel(level=eval("logging." + level))
#def start_progress_bar(self):
#self.progress_bar_thread = threading.Thread(
#target=self.progress_bar,
#name="progress_bar")
#self.progress_bar_thread.setDaemon(True)
#self.progress_bar_thread.start()
#def progress_bar(self):
#pass
| [
"aheadlead@dlifep.com"
] | aheadlead@dlifep.com |
feecfc389496c8c1ebf9368e8be2da15764d4830 | 04741556cbaa4f98afea194bc0997723eb06fa49 | /Python/Group(), Groups() & Groupdict().py | 4be0ae3f083124a4cdca9efedf23be66f04f9837 | [] | no_license | maybeee18/HackerRank | e75f619654326e0deae83ee4fe818d20d606bdda | dce488916f643248ea86500e176322a3b9e3b671 | refs/heads/master | 2023-01-21T23:22:07.206264 | 2020-10-05T04:54:45 | 2020-10-05T04:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py |
import re
s = str(input())
m = re.search(r'([a-zA-Z\d])\1+', s)
print(m.group(1) if m else -1)
| [
"n.okroshiashvili@gmail.com"
] | n.okroshiashvili@gmail.com |
d05bb1fea607c83d07623745d2c57a7194956709 | 7ee2643c586dc542874d4a6fa58f0a1fb3ecd8b0 | /Python Exercises/01.conditionals_1.py | b4860c35a70beb1b2d9beaf2d170b4b3cdbce21f | [] | no_license | chathuRashmini/Python_For_Everybody | de2c0480a6d1157c70bd6730aba3a61a37266369 | 3c22ed094a419ca123b953273d0b7204a5ab9ce0 | refs/heads/main | 2023-04-22T09:35:37.793200 | 2021-05-11T01:49:34 | 2021-05-11T01:49:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | #!/bin/python
# Task----------------------------------------------------------
# Given an integer, n , perform the following conditional actions:
# If n is odd, print Weird
# If n is even and in the inclusive range of 2 to 5, print Not Weird
# If n is even and in the inclusive range of 6 to 20, print Weird
# If n is even and greater than 20, print Not Weird
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(raw_input().strip())
if n%2 != 0:
print('Weird')
else:
if n>=2 and n<=5:
print('Not Weird')
elif n>=6 and n<=20:
print('Weird')
else:
print('Not Weird')
| [
"noreply@github.com"
] | noreply@github.com |
b15f982248a07bd81468603028e04993dab62e2c | 948f0a1ccee30084b5e6e9b1043bd1681d2ad38f | /app/1.2.py | 11b5df0c3846432a5436737d1486af287a9799af | [
"MIT"
] | permissive | filangelos/random-forest | 41454e934cf72cf1480cf5c001d569e629f578ac | 0fc7a4f74b1120f3e527e824abc1de1aa32f2b18 | refs/heads/master | 2021-09-09T13:53:30.982028 | 2018-03-16T18:38:36 | 2018-03-16T18:38:36 | 121,535,264 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,429 | py | # EXECUTION TIME: 49s
# Python 3 ImportError
import sys
sys.path.append('.')
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# prettify plots
plt.rcParams['font.family'] = 'Times New Roman'
sns.set_style({"xtick.direction": "in", "ytick.direction": "in"})
b_sns, g_sns, r_sns, p_sns, y_sns, l_sns = sns.color_palette("muted")
import src as ya
from src.struct import SplitNodeParams
from src.struct import ForestParams
np.random.seed(0)
# fetch data
data_train, data_query = ya.data.getData('Toy_Spiral')
N, D = data_train.shape
###########################################################################
# Split functions Comparison and Sparsity
###########################################################################
# number of splits
numSplit = 10
# weak learners
kernels = ['axis-aligned', 'linear', 'quadratic', 'cubic']
for frac in [1.00, 0.50, 0.25, 0.10]:
# random dataset
idx = np.random.choice(range(N), int(N*frac), True)
# root node
root = ya.tree.Node(idx=idx, t=np.nan, dim=-2, prob=[])
for kernel in kernels:
# reset seed
np.random.seed(0)
# get information gain
_ = ya.tree.splitNode(data_train,
root, SplitNodeParams(numSplit, kernel),
savefig_path='1.2/%s_%.2f' % (kernel, frac))
###########################################################################
# Kernel Complexity
###########################################################################
# number of experiments per kernel
M = 10
# execution time
runtime = pd.DataFrame(columns=kernels, index=range(M))
# memory
memory = pd.DataFrame(columns=kernels, index=range(M))
for kernel in kernels:
# repetitions
for j in range(M):
# start time
t0 = time.time()
_forest = ya.tree.growForest(data_train, ForestParams(
num_trees=10, max_depth=5, weak_learner=kernel
))
# end time
runtime.loc[j, kernel] = time.time() - t0
# object memory size
memory.loc[j, kernel] = sys.getsizeof(_forest)
# figure
fig, axes = plt.subplots(ncols=2, figsize=(12.0, 3.0))
# execution time
run = runtime.mean().values
axes[0].bar(range(len(runtime.columns)),
[run[i]*(1+0.15*i) for i in range(len(run))],
color=sns.color_palette("muted"))
axes[0].set_xticks(range(len(runtime.columns)))
axes[0].set_xticklabels(runtime.columns)
axes[0].set_title("Time Complexity of Weak Learners")
axes[0].set_xlabel("Weak Learner")
axes[0].set_ylabel("Training Time (s)")
# memory complexity
mem = memory.mean().values
axes[1].bar(range(len(memory.columns)),
[mem[i]*(1+0.1*i) for i in range(len(mem))],
color=sns.color_palette("muted"))
axes[1].set_xticks(range(len(memory.columns)))
axes[1].set_xticklabels(memory.columns)
axes[1].set_title("Memory Complexity of Weak Learners")
axes[1].set_xlabel("Weak Learner")
axes[1].set_ylabel("Memory Size (byte)")
fig.tight_layout()
fig.savefig('assets/1.2/complexity_kernel.pdf',
format='pdf',
dpi=300,
transparent=True,
bbox_inches='tight',
pad_inches=0.01)
###########################################################################
# `numSplit` vs weak-learners
###########################################################################
# random dataset
idx = np.random.choice(range(N), N, True)
# root node
root = ya.tree.Node(idx=idx, t=np.nan, dim=-2, prob=[])
# range of number of splits
numSplits = [1, 5, 10, 25, 50, 100, 1000]
# weak learners
kernels = ['axis-aligned', 'linear', 'quadratic', 'cubic']
IGS = pd.DataFrame(columns=kernels, index=numSplits)
for j, numSplit in enumerate(numSplits):
# weak-learners
for kernel in kernels:
# reset seed
np.random.seed(0)
# get information gain
_, _, _, ig = ya.tree.splitNode(data_train,
root,
SplitNodeParams(numSplit, kernel))
IGS.loc[numSplit, kernel] = ig
# table to be used for report
print('\n', IGS.to_latex(), '\n')
IGS.to_csv('assets/1.2/information_gain_vs_weak_learners.csv')
# we could also generate a qualitative comparison with a matrix
# of decision boundaries and IGs
# reference: Figure 4 from https://github.com/sagarpatel9410/mlcv/blob/master/CW1/report/mlcv.pdf
| [
"filos.angel@gmail.com"
] | filos.angel@gmail.com |
119a6abe72d224b7b326cfdd57b58142becaead7 | 2057b286dd6d894943fd810cc1a17e212c0bab16 | /app/core/models.py | bd99aeba8b162b6a0da4427a7179792e59715e60 | [
"MIT"
] | permissive | Jevra1995/recipe-app-api | 73105fcc064ee01802895762d65fcc76bedca1dc | f5881de5fce0cd25094fbbd5ba040c0f601b2c0b | refs/heads/master | 2020-05-04T07:40:10.111184 | 2019-04-05T12:17:35 | 2019-04-05T12:17:35 | 179,033,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,589 | py | import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
title = models.CharField(max_length=255)
price = models.DecimalField(max_digits=5, decimal_places=2)
time_minutes = models.IntegerField()
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| [
"nikola.jevremovic@htecgroup.com"
] | nikola.jevremovic@htecgroup.com |
8795ae563d13e777b07be9c178c3032e7c2d9605 | a1cd3ce533ce070c7c77e6a30a6d0b6a14fc9854 | /vegvisir.py | 45f7a9b1f3a33f4627260be1742b1223784f2dea | [
"MIT"
] | permissive | jelbrek/vegvisir | 12e73cca7fe3a30ea0ff7b420eeb4e6b29e06cc1 | 2ef2d5826665068c88f3de827af8ddf87bba9439 | refs/heads/master | 2022-02-18T05:50:50.081719 | 2019-10-06T06:22:27 | 2019-10-06T06:22:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,105 | py | #!/usr/bin/python
import os
import json
from app.config import config
from app.core import lldbcontroller as LLDBController
from flask import Flask, request, Response, render_template, jsonify
from flask_cors import CORS
name = "vegvisir"
app = Flask(name ,template_folder="www", static_folder="www/")
app.config.update(
DEBUG=True,
TEMPLATES_AUTO_RELOAD=True
)
CORS(app, supports_credentials=False)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', 'http://%s:%s'%(config.HOST, config.PORT))
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,auth')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
response.headers.add('Access-Control-Allow-Credentials', 'false')
response.headers.add('Access-Control-Expose-Headers', 'auth')
return response
@app.route('/<path:path>')
def static_proxy(path):
# send_static_file will guess the correct MIME type
return app.send_static_file(path)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/if_target', methods=["GET"])
def if_target():
try:
if lldbContr.ifTarget():
entrypoint = lldbContr.getEntryPoint()
functions = lldbContr.doReturnFunctions()
sections,section_sizes = lldbContr.doReturnSections()
disassembly = lldbContr.capDisassemble(long(entrypoint, 16), 0x100)
strings = lldbContr.doReturnStrings()
context = lldbContr.context()
binary = lldbContr.exe
return jsonify({"success":True, "binary":binary, "entrypoint":entrypoint, "functions":functions,"sections":sections,"section_sizes":section_sizes,"disassembly":disassembly,"strings":strings,"context":context})
else:
print 'No target'
return jsonify({"success":False,"targe":False})
except Exception,e:
return jsonify({"success":False,"error":"%s"%e})
@app.route('/set_target', methods=["POST"])
def set_target():
req = request.json
path = str(req["command"]).replace("target create ","")
if path and os.path.isfile(path):
lldbContr.setTarget(str(path), "")
lldbContr.capstoneinit()
if lldbContr.target:
entrypoint = lldbContr.getEntryPoint()
functions = lldbContr.doReturnFunctions()
sections,section_sizes = lldbContr.doReturnSections()
disassembly = lldbContr.capDisassemble(long(entrypoint,16), 0x100)
strings = lldbContr.doReturnStrings()
context = lldbContr.context()
return jsonify({"success":True, "entrypoint":entrypoint, "functions":functions,"sections":sections,"section_sizes":section_sizes,"disassembly":disassembly,"strings":strings,"context":context})
return jsonify({"success":False, "error":"Please give a valid binary path."})
@app.route('/run_command', methods=['POST'])
def run_command():
req = request.json
command = str(req["command"])
try:
success, op = lldbContr.runCommands(command)
if success:
context = lldbContr.context();
return jsonify({"success":True,"output":op,"context":context})
return jsonify({"success":False,"error":op})
except Exception, e:
return jsonify({"success":False, "error":"There was an error while running the command. Error:%s"%(e)})
@app.route('/get_disassembly', methods=['GET'])
def get_disassembly():
func_name = str(request.args.get("func"))
start_addr = str(request.args.get("start_addr"))
end_addr = str(request.args.get("end_addr"))
disassembly = lldbContr.disassemble(func_name, start_addr, end_addr)
if disassembly:
return jsonify({"success":True, "disassembly":disassembly})
return jsonify({"success":False, "error":"non readable"})
@app.route('/get_entrypoint_disassembly', methods=['GET'])
def get_entrypoint_disassembly():
entrypoint = lldbContr.getEntryPoint()
disassembly = lldbContr.capDisassemble(long(entrypoint,16), 0x100)
if disassembly:
return jsonify({"success":True, "disassembly":disassembly})
return jsonify({"success":False, "error":"non readable"})
if __name__ == '__main__':
lldbContr = LLDBController.LLDBController()
app.run(host=config.HOST, port=config.PORT)
| [
"ichaithu@gmail.com"
] | ichaithu@gmail.com |
14cf9d39ac25408754cded21c849a8148f084381 | 6751b7b958b7c7f08a2c6144a2fbfecf9b2823db | /topo7.py | b6fbffb29beacfbc7281fb98b8468740a445879a | [] | no_license | J0e3gan/Computer-Network-6250-Log-Result | e05f65555cd030ccd85feae95a4aaf99e2b87460 | 6f8f81190ad588d70dd1a3ad70dd26679bfa43f8 | refs/heads/master | 2021-01-17T14:15:07.658439 | 2015-06-15T03:09:25 | 2015-06-15T03:09:25 | 37,440,791 | 1 | 0 | null | 2015-06-15T03:13:18 | 2015-06-15T03:13:18 | null | UTF-8 | Python | false | false | 1,778 | py | # a Star topology centered on Z with a loop around the outside
# +-------------D---------G---------J------------+
# | \ | / |
# | \ | / |
# | E H K |
# | \ | / |
# | \ | / |
# | F I L |
# | \ | / |
# | \ | / |
# A --- B --- C --------- Z -------- M --- N --- O
# | / | \ |
# | / | \ |
# | P S V |
# | / | \ |
# | / | \ |
# | Q T W |
# | / | \ |
# | / | \ |
# +-------------R---------U---------X------------+
topo = { 'A' : ['B', 'D', 'R'],
'B' : ['A', 'C'],
'C' : ['B', 'Z'],
'D' : ['A', 'E', 'G'],
'E' : ['D', 'F'],
'F' : ['E', 'Z'],
'G' : ['D', 'H', 'J'],
'H' : ['G', 'I'],
'I' : ['H', 'Z'],
'J' : ['G', 'K', 'O'],
'K' : ['J', 'L'],
'L' : ['K', 'Z'],
'M' : ['Z', 'N'],
'N' : ['M', 'O'],
'O' : ['J', 'N', 'X'],
'P' : ['Z', 'Q'],
'Q' : ['P', 'R'],
'R' : ['A', 'Q', 'U'],
'S' : ['Z', 'T'],
'T' : ['S', 'U'],
'U' : ['R', 'T', 'X'],
'V' : ['Z', 'W'],
'W' : ['V', 'X'],
'X' : ['U', 'W', 'O'],
'Z' : ['C', 'F', 'I', 'L', 'M', 'P', 'S', 'V']}
| [
"bpmmhk91@gmail.com"
] | bpmmhk91@gmail.com |
90919708b38f1802fc5b4fd478de24850db54d26 | 2bfdd061bbd594716216402f5058401285352fae | /main.py | e59397fbee31e1a414dae3c4fb6910bff4d53017 | [] | no_license | thelotusflower/openni-file-player | ab4f6ff543c2bd44a1919a71a624335b08bc46a1 | edae9fd231e2728dca7778d52f0e5e8cdcd10287 | refs/heads/master | 2020-03-26T11:30:18.175564 | 2018-08-15T11:44:39 | 2018-08-15T11:44:39 | 144,846,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import gui
import sys
from PyQt5 import QtWidgets
app = QtWidgets.QApplication(sys.argv)
window = gui.Player()
app.exec()
sys.exit()
| [
"noreply@github.com"
] | noreply@github.com |
86cc3320d97d3f645d71865a5d006a4c71317a53 | 952a411236d1052e1e3d67d72cf01a973e8e71e5 | /c6_decesion_tree_create.py | 2f88c11f9f744a59830d6a3a1eb3d8f339e15c79 | [] | no_license | cuidongxu666/machine_learning_review | 698a0854c130f8ee72b7f9ce5595d398be513f6c | 07bdb82f74713707c3f2fb2aaa1fd13f6c983ed9 | refs/heads/master | 2020-09-08T19:35:48.726806 | 2019-11-12T13:49:53 | 2019-11-12T13:49:53 | 221,226,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | #模拟信息熵进行一次划分
from collections import Counter
import numpy as np
from math import log
#根据特征d,阈值value进行划分
def split(X,y,d,value):
index_a=(X[:d]<=value)
index_b=(X[:d]>value)
return X[index_a],X[index_b],y[index_a],y[index_b]
#计算信息熵 基尼系数
def entropy(y):
counter=Counter(y)
res=0.0
#res=1
for num in counter.values():
p=num/len(y)
res+=-p*log(p)
#res-=p**2
def try_split(X,y):
best_entropy=float('inf')
best_d,best_v=-1,-1
for d in range(X.shape[1]):
sorted_index=np.argsort(X[:d])
for i in range(1,len(X)):
if X[sorted_index[i-1],d]!=X[sorted_index[i],d]:
v=(X[sorted_index[i-1],d]+X[sorted_index[i],d])/2
X_l,X_r,y_l,y_r=split(X,y,d,v)
#重点:划分后,对划分后的两组分别求信息熵,相加(划分后的整体信息熵)
e=entropy(y_l)+entropy(y_r)
if e<best_entropy:
best_entropy,best_d,best_v=e,d,v
return best_entropy,best_d,best_v
| [
"1041254428@qq.com"
] | 1041254428@qq.com |
09ffb5881d97e3793dd1a6ca240d478cc0e58790 | 18f33f3ed62229552e92d73022a9ea4e29d892d9 | /bin/py/isup.py | daf88fc2a5ba4298b60002f388f5a0f019fe1206 | [] | no_license | ttaylordev/z | 70997681332b74dce9b444efed25743f5fd71c67 | 6afd06283b6f2f15c409b7ad651b0a3c4ded2fa7 | refs/heads/master | 2020-12-30T17:20:04.690100 | 2016-05-31T14:43:27 | 2016-05-31T14:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,993 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright © 2011 Nicolas Paris <nicolas.caen@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import urllib
class DownForEveryone(object):
base_url = 'http://www.downforeveryoneorjustme.com/'
def __init__(self):
self.parse_url()
self.is_url()
self.get_answer(
urllib.urlopen(self.base_url+self.url).read()
)
def parse_url(self):
try:
self.url = sys.argv[1]
except IndexError:
self.usage()
def is_url(self):
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
resp = re.findall(url_regex, self.url)
if len(resp) != 1:
print 'The argument does not seems to be an URL'
self.usage()
def get_answer(self, response):
up = 'It\'s just you.'
down ='It\'s not just you!'
if up in response:
print '\033[1;32m{} {} {}\033[1;m'.format(up, self.url, 'is up.')
elif down in response:
print '\033[1;31m{} {} {}\033[1;m'.format(down, self.url, 'looks down from here.')
else:
print 'Error to find the answer'
def usage(self):
print 'Usage:'
print ' {} http://www.exemple.org'.format(sys.argv[0])
sys.exit(0)
if __name__ == '__main__':
DownForEveryone() | [
"zac@zacanger.com"
] | zac@zacanger.com |
1c5db695c00805487dc0b8800c680a5d899c3af3 | 31cbea7109013dc56e7393a7553e5a7ac664b45e | /2.first_step_tf/first_step_tf.py | c0e101d923fee67fc4f5ab5768fb9f6f58062381 | [] | no_license | abhishek9165/ml_cc | fdf043f62d3e009402615bb558a3d4d059ea059d | 79adbbac172a35ee4c75b8417284649b51e31297 | refs/heads/master | 2020-03-18T19:26:44.678433 | 2018-06-01T06:22:42 | 2018-06-01T06:22:42 | 135,154,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,672 | py | import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
df = pd.read_csv("california_housing_train.csv", sep=",")
df = df.reindex(np.random.permutation(df.index))
df["median_house_value"] /= 1000.0
#print (df)
#print (df.describe())
# Define the input feature: total_rooms.
my_feature = df[["total_rooms"]]
# print(my_feature)
# Configure a numeric feature column for total_rooms.
feature_columns = [tf.feature_column.numeric_column("total_rooms")]
# print(feature_columns)
targets = df["median_house_value"]
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0000001)
# print(my_optimizer)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
# print(my_optimizer)
# Configure the linear regression model with our feature columns and optimizer.
# Set a learning rate of 0.0000001 for Gradient Descent.
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# print(linear_regressor)
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(buffer_size=10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
_ = linear_regressor.train(
input_fn = lambda:my_input_fn(my_feature, targets),
steps=100
)
prediction_input_fn =lambda: my_input_fn(my_feature, targets, num_epochs=1, shuffle=False)
# Call predict() on the linear_regressor to make predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
# Format predictions as a NumPy array, so we can calculate error metrics.
predictions = np.array([item['predictions'][0] for item in predictions])
# Print Mean Squared Error and Root Mean Squared Error.
mean_squared_error = metrics.mean_squared_error(predictions, targets)
root_mean_squared_error = math.sqrt(mean_squared_error)
print "Mean Squared Error (on training data): %0.3f" % mean_squared_error
print "Root Mean Squared Error (on training data): %0.3f" % root_mean_squared_error
min_house_value = df["median_house_value"].min()
max_house_value = df["median_house_value"].max()
min_max_difference = max_house_value - min_house_value
print "Min. Median House Value: %0.3f" % min_house_value
print "Max. Median House Value: %0.3f" % max_house_value
print "Difference between Min. and Max.: %0.3f" % min_max_difference
print "Root Mean Squared Error: %0.3f" % root_mean_squared_error
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"):
periods = 10
steps_per_period = steps / periods
my_feature = input_feature
my_feature_data = df[[my_feature]]
my_label = "median_house_value"
targets = df[my_label]
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column(my_feature)]
# Create input functions.
training_input_fn = lambda:my_input_fn(my_feature_data, targets, batch_size=batch_size)
prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# Set up to plot the state of our model's line each period.
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.title("Learned Line by Period")
plt.ylabel(my_label)
plt.xlabel(my_feature)
sample = df.sample(n=300)
plt.scatter(sample[my_feature], sample[my_label])
colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print "Training model..."
print "RMSE (on training data):"
root_mean_squared_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
# Compute loss.
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(predictions, targets))
# Occasionally print the current loss.
print " period %02d : %0.2f" % (period, root_mean_squared_error)
# Add the loss metrics from this period to our list.
root_mean_squared_errors.append(root_mean_squared_error)
# Finally, track the weights and biases over time.
# Apply some math to ensure that the data and line are plotted neatly.
y_extents = np.array([0, sample[my_label].max()])
weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
x_extents = (y_extents - bias) / weight
x_extents = np.maximum(np.minimum(x_extents,
sample[my_feature].max()),
sample[my_feature].min())
y_extents = weight * x_extents + bias
plt.plot(x_extents, y_extents, color=colors[period])
print "Model training finished."
# Output a graph of loss metrics over periods.
plt.subplot(1, 2, 2)
plt.ylabel('RMSE')
plt.xlabel('Periods')
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(root_mean_squared_errors)
# Output a table with calibration data.
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
display.display(calibration_data.describe())
print "Final RMSE (on training data): %0.2f" % root_mean_squared_error
train_model(
learning_rate=0.0001,
steps=100,
batch_size=1
)
| [
"verma.abhishek9165@innoplexus.com"
] | verma.abhishek9165@innoplexus.com |
93b4179de3c328ba26c4fa4e6ac0bc062a52d1fd | fc46f42dde6b4b7d7fc79ef1af6f65719215cc92 | /referencia/lab_rpc/rpyc/main.py | 1ecb47063a2d24a1c47566dda3776182e1ca9f92 | [] | no_license | andrelimabessa/sd217.2 | 47986dc271755ab8691359fae06ef5c2cf2ac540 | 1a98dfc67ac5be0b81f14282aba6d2a6a9a09814 | refs/heads/master | 2021-01-15T23:59:50.910011 | 2017-12-18T02:48:54 | 2017-12-18T02:48:54 | 99,948,047 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | import sys
from rpyc_server import server
from rpyc_client import client
print("Você quer executar:")
print("1 para servidor")
print("2 para cliente")
opcao = input("Opção:")
try:
if int(opcao) == 1:
print("Servidor ativado:\n")
server()
elif int(opcao) == 2:
print("Cliente ativado:\n")
client()
except : # pega todas possíveis
for val in sys.exc_info():
print(val)
input()
| [
"andrebessa@gmail.com"
] | andrebessa@gmail.com |
9ae00e78ca2c66a4385108f8f7d493052ee74a6f | 5a38ffc206890a323ec9bc92c7de2369d2650012 | /arquivos-caderneta/printer-scripts/print_pagamento_padaria.py | 37b499b25ea1db00341de588e3447d565169244d | [] | no_license | MBorgesT/Fiado | 2e316b6250b1019e5f99401114e21018a00f515b | b25a32c2b14be2df9279c436770fe54e322fe703 | refs/heads/master | 2021-07-06T15:21:27.034877 | 2020-08-30T21:10:14 | 2020-08-30T21:10:14 | 162,161,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | from escpos.printer import Usb
from unidecode import unidecode
import json
file = open('/home/matheus/arquivos-caderneta/printer-scripts/pagamento.json')
json_data = json.load(file)
printer = Usb(0x0416, 0x5011)
id_pagamento = json_data['id_pagamento']
data = json_data['data']
valor = json_data['valor']
cliente = json_data['cliente']
atendente = json_data['atendente']
observacao = json_data['observacao']
qtd_compras = int(json_data['qtd_compras'])
compras_list = []
for i in range(qtd_compras):
data_aux = json_data['compra_data' + str(i)]
valor_aux = json_data['compra_valor' + str(i)]
compras_list.append([data_aux, valor_aux])
# recibo do cliente
printer.set(align='center', width=2, height=2, invert=True)
printer.text(' Paes & Cia \n\n')
printer.set(align='center', width=2, height=1)
printer.text('Recibo de\npagamento\n\n')
printer.set(align='left', width=1, height=1)
printer.text('ID: ' + id_pagamento + '\n')
printer.text('Data: ' + data + '\n')
printer.text('Valor total: R$ ' + valor + '\n')
printer.text('Cliente: ' + cliente + '\n')
printer.text('Atendente: ' + atendente + '\n')
if (observacao != 'null'):
printer.text('Observacao: ' + observacao + '\n')
printer.text('\n')
for compra in compras_list:
len_compra = len(compra[1])
str_spaces = ''
for i in range(13 - len_compra):
str_spaces += ' '
printer.text(compra[0] + str_spaces + compra[1] + '\n')
printer.text('\n\n')
printer.set(align='center')
printer.text('__________________________\n')
printer.text('Atendente\n\n\n')
printer.text('__________________________\n')
printer.text('Cliente')
printer.text('\n\n')
printer.set(align='center', width=1, height=2)
printer.text('Este recibo pertence a Padaria. Favor nao entregar ao cliente.')
printer.cut() | [
"matheus.borgest@gmail.com"
] | matheus.borgest@gmail.com |
2a81f7d4b1227f25359095b9287bd11ddefebfa3 | 1e07ebba0d691a53ed9859c4514fa0fa26096948 | /comentario/migrations/0012_auto_20190922_1843.py | 027e4158ca636543dba5ab86d483626db3a0b521 | [] | no_license | frankbriones/fundacion | bf52a9be94348306b55506677c173428cc626fc1 | 9742d244526374aa4bbcb6c338b33a698c751a1d | refs/heads/master | 2022-12-22T12:29:39.106710 | 2019-11-08T00:25:04 | 2019-11-08T00:25:04 | 191,661,945 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | # Generated by Django 2.1.1 on 2019-09-22 23:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comentario', '0011_auto_20190921_1602'),
]
operations = [
migrations.AlterField(
model_name='comentario',
name='estado',
field=models.SmallIntegerField(blank=True, choices=[(1, 'Inactivo'), (0, 'Activo')], default=0, null=True),
),
]
| [
"frankbriones90@gmail.com"
] | frankbriones90@gmail.com |
ffd08081db47a183d721c341ba8da28946c71515 | c451bbc6d0014cf7d251fc79176b5d789bacdf71 | /plasticWM/migrations/0004_auto_20210424_1510.py | 41c33a80ab5ca43db652ead4ccd1932c8fb6d9c3 | [] | no_license | AshwinAnoop/RewardsForPlastic | 58d6a2d781b777c693dac6f270281e599e3fe7e3 | 8a6d6a8c3a93d99f28a1d4b00ad3723676624043 | refs/heads/main | 2023-04-10T14:08:33.402819 | 2021-04-25T17:22:59 | 2021-04-25T17:22:59 | 361,137,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | # Generated by Django 3.1.7 on 2021-04-24 15:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('plasticWM', '0003_auto_20210424_1453'),
]
operations = [
migrations.AlterField(
model_name='scrapshop',
name='locality',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='plasticWM.locality'),
),
]
| [
"ashwinka999@gmail.com"
] | ashwinka999@gmail.com |
45195cd9a511fdfd4e923e24cec6b203242b4440 | 5d5365a73e81ccf71c73b9d86eb070841f1e0001 | /backend/wallet/admin.py | bee6e39eb2e35074d6224625dcf762cc55d21246 | [] | no_license | crowdbotics-apps/yjjhffhg-22011 | 3c908901c5fa930df11d6af17471a39a7e3b1dd9 | c5649303aef6b69f515c4526df8b43ee82212c12 | refs/heads/master | 2023-01-12T02:01:16.200496 | 2020-10-27T15:36:39 | 2020-10-27T15:36:39 | 307,746,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from django.contrib import admin
from .models import (
PaymentTransaction,
TaskerPaymentAccount,
TaskerWallet,
PaymentMethod,
CustomerWallet,
)
admin.site.register(TaskerWallet)
admin.site.register(PaymentMethod)
admin.site.register(TaskerPaymentAccount)
admin.site.register(CustomerWallet)
admin.site.register(PaymentTransaction)
# Register your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e6e186ddde3a4e473e162348a1c1eb6df3cbbf5d | 567f76818219464714e49fd4ba4bac43494107f1 | /stopwords.py | 433b23f0bef87fbaba89f096b9a69ef8dc9e23ac | [] | no_license | anjalibhavan/termextraction | f9c34113c15859919789cff8dc3bd42ffaa04108 | afe28e6b490156ae3fcf3bb01e2636fdac735c52 | refs/heads/master | 2020-06-01T01:38:40.578265 | 2019-10-01T12:45:18 | 2019-10-01T12:45:18 | 190,580,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,199 | py | import nltk
import string
from nltk import word_tokenize
from collections import Counter
import os
from chardet import detect
from nltk.corpus import stopwords
import numpy as np
from nltk.util import ngrams
import pandas as pd
import scipy
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
rootdir = 'C:/Users/anjali/desktop/nptel/'
stop_words = set(stopwords.words('english'))
print(stop_words)
#docs=[]
for file_name in os.listdir(rootdir):
froot="".join(file_name)
root_dir=rootdir+froot+'/'
docs=[]
for filename in os.listdir(root_dir):
if filename[3]=='n':
name = "".join(filename)
full_name = root_dir+name
from_codec = get_encoding_type(full_name)
with open(full_name, 'r', encoding=from_codec) as f, open('trgfile.txt', 'w', encoding='utf-8') as e:
text = f.read() # for small files, for big use chunks
e.write(text)
os.remove(full_name) # remove old encoding file
os.rename('trgfile.txt', full_name) # rename new encoding
f1 = open(full_name,encoding='utf8')
doc = f1.read()
docs.append(doc)
docs = (' '.join(filter(None, docs))).lower()
tokens = word_tokenize(docs)
tokens = [t.lower() for t in tokens if t not in stop_words and t not in string.punctuation]
unigrams = list(ngrams(tokens,1))
c = Counter(unigrams)
docstopwords = [x[0][0] for x in c.most_common(100)]
for filename in os.listdir(root_dir):
if filename[3]=='n':
name = "".join(filename)
full_name = root_dir+name
f = open(full_name,encoding='utf8')
doc = f.read().split()
words = [words.lower() for words in doc if words not in stop_words]
with open(full_name,'w',encoding='utf8') as fout:
fout.write(" ".join(words))
print('done')
'''
docs = (' '.join(filter(None, docs))).lower()
tokens = word_tokenize(docs)
postokens = nltk.pos_tag(tokens)
tokens = [t for t in tokens if t not in stop_words and t not in string.punctuation]
bigrams = list(ngrams(tokens,2))
c = Counter(bigrams)
docstops = [x[0] for x in c.most_common(100)]
docstopwords=[]
for tupes in docstops:
docstopwords.append( (''.join([w+' ' for w in tupes])).strip())
print(docstopwords)
def neighbors(mylist):
i = 0
while i+1 < len(mylist):
yield (mylist[i], mylist[i + 1])
i += 1
for filename in os.listdir(root_dir):
if filename[3]=='n':
name = "".join(filename)
full_name = root_dir+name
f = open(full_name,encoding='utf8')
doc = f.read().split()
words = [words for words,y in neighbors(doc) if words+' '+y not in docstopwords and words not in string.punctuation and y not in string.punctuation]
with open(full_name,'w',encoding='utf8') as fout:
fout.write(" ".join(words))
print('done')
'''
'''
c = Counter(unigrams)
docstopwords = [x[0][0] for x in c.most_common(100)]
for filename in os.listdir(root_dir):
if filename[3]=='n':
name = "".join(filename)
full_name = root_dir+name
f = open(full_name,encoding='utf8')
doc = f.read().split()
words = [words for words in doc if words not in docstopwords and words not in string.punctuation]
#UTF-8-SIG Windows-1252
with open(full_name,'w',encoding='utf8') as fout:
fout.write(" ".join(words))
'''
| [
"anjalibhavan98@gmail.com"
] | anjalibhavan98@gmail.com |
65b7349016818ec0fd5b7471e004da12717831be | a02ca4a895214f9daa984019e5614d6f9c4ba9a9 | /HelloTube/urls.py | 0d4ef456157bebd7a3de7e422a2e133a5ef07489 | [] | no_license | zuoxiaotian/hellotube | 49ee45cbdbc7b62f7d81aca0706f8e1761b373a5 | be39307ab6645658400da7e29da6adbde63ae00e | refs/heads/master | 2020-06-07T06:15:15.993834 | 2019-06-20T21:14:41 | 2019-06-20T21:14:41 | 192,946,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.conf.urls import url
from HelloTube import settings
from . import view
urlpatterns = [
url(r'^$', view.index),
url(r'^extract', view.extract),
url(r'^send', view.send),
url(r'^create', view.create),
url(r'^download/(?P<name>.+)/',view.download),
] | [
"noreply@github.com"
] | noreply@github.com |
a130bec774c892fb9c1a395120872e7e39f4ff02 | 41ea6ff5e61cfa67dbf7000a31faf0e30ab2fcb2 | /python handson/Python for data structure/array_missing_element.py | b820bec134551f0933b98fa065d9aa4f209aec44 | [] | no_license | 12btcse1108/hacker-rank-sol | 576907c58a484264e87817dc6dbb54ac867bf601 | 870b4f0579a19f3b792da95fc0aca6a3d3ae308f | refs/heads/master | 2021-08-16T17:35:52.408782 | 2017-11-20T06:11:17 | 2017-11-20T06:11:17 | 111,369,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py |
# coding: utf-8
# In[16]:
def array_missing(arr1, arr2):
new_arr = []
for item in arr1:
if item in arr2:
arr2.remove(item)
else:
new_arr.append(item)
return ("\t".join(map(str,new_arr)))
# In[17]:
def array_missing2(arr1,arr2):
arr1.sort()
arr2.sort()
for i,j in zip(arr1,arr2):
if i != j:
return i
return arr1[-1]
# In[19]:
import collections
def array_missing3(arr1,arr2):
d = collections.defaultdict(int)
for item in arr2:
d[item] += 1
for item in arr1:
if d[item] == 0:
return item
else:
d[item] -=1
print(array_missing([9,8,7,6,5,4,3,2,1],[9,8,7,5,4,3,2,1]))
# In[ ]:
| [
"12btcse1108"
] | 12btcse1108 |
c20c901564732be11e80764493ab831d08542a07 | 307e094d52f14a930c56bc2b629881d51647c676 | /src/libpolycrypto/experiments/threshsig/export-latex.py | 4d04e6da32d6fd8d363c12e539a0086a3d81b362 | [] | no_license | tyurek/libpolycrypto-docker | f7ca3be921d2383bf86dfab060d8781a9fa02fdc | d4e61020b699936ed1483601f1d7eb5c91341a8e | refs/heads/master | 2021-02-14T03:33:56.550740 | 2020-04-06T21:15:51 | 2020-04-06T21:15:51 | 244,763,536 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,064 | py | #!/usr/bin/env python2.7
import matplotlib
matplotlib.use('Agg') # otherwise script does not work when invoked over SSH
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import MonthLocator, DateFormatter, DayLocator, epoch2num, num2date
import pandas
import sys
import os
import time
improvLatexSymb="\\texttimes"
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], "<output-latex> <csv-file> [<csv_file> ...]"
sys.exit(0)
del sys.argv[0]
out_tex_file = sys.argv[0]
del sys.argv[0]
if not out_tex_file.endswith('.tex'):
print "ERROR: Expected .tex file as first argument"
sys.exit(1)
data_files = [f for f in sys.argv]
print "Reading CSV files:", data_files, "..."
csv_data = pandas.concat((pandas.read_csv(f) for f in data_files), ignore_index=True)
#print "Raw:"
#print csv_data.to_string()
#print csv_data.columns
#print csv_data['dictSize'].values
#print "Averaged:"
minN = csv_data.n.unique().min();
maxN = csv_data.n.unique().max();
print "min N:", minN
print "max N:", maxN
#print csv_data.to_string() # print all data
print csv_data[['k','n','interpolation_method', 'lagr_hum', 'multiexp_hum', 'total_hum']].to_string()
# open the file in append mode, and truncate it to zero bytes if it has data
f = open(out_tex_file, "a+")
isEmpty = os.fstat(f.fileno()).st_size == 0
if not isEmpty:
f.truncate(0)
def humanizeBytes(numBytes, precision = 2):
result = float(numBytes)
units = [ "bytes", "KiB", "MiB", "GiB", "TiB" ]
i = 0;
while result >= 1024.0 and i < len(units) - 1:
result /= 1024.0
i = i+1
#string = (("%." + str(precision) + "f") % result)
string = ("{:." + str(precision) + "f}").format(result)
string += " "
string += units[i]
return string
def humanizeMicroseconds(mus, precision = 2):
result = float(mus)
units = [ "mus", "ms", "secs", "mins", "hrs", "days", "years" ]
numUnits = len(units)
i = 0
while result >= 1000.0 and i < 2:
result /= 1000.0
i = i+1
while result >= 60.0 and i >= 2 and i < 4:
result /= 60.0
i = i+1
if i == 4 and result >= 24.0:
result /= 24.0
i = i+1
if i == 5 and result >= 365.25:
result /= 365.25
i = i+1
assert(i < numUnits)
string = ("{:." + str(precision) + "f}").format(result)
string += " "
string += units[i]
return string
# update avg_deal+verify_hum with humanized bytes, if they are 'nan':
for idx, r in csv_data.iterrows():
if str(r['total_hum']) == 'nan':
#print "Humanizing row:", r.values
csv_data.ix[idx, 'total_hum'] = humanizeMicroseconds(int(r['total_usec']))
print csv_data[['k','n','interpolation_method', 'lagr_hum', 'multiexp_hum', 'total_hum', 'total_usec']].to_string()
def write_latex_case_macro(f, data, macroName, col1, col2):
f.write("\\newcommand{\\" + macroName + "}[1]{%\n")
f.write(" \IfStrEqCase{#1}{")
for _, r in data.iterrows():
f.write("\n {" + str(r[col1]).strip() + "}{" + str(r[col2]).strip() + "\\xspace}")
f.write("}[\\textcolor{red}{\\textbf{NODATA}}]}\n\n")
write_latex_case_macro(f, csv_data[csv_data.interpolation_method == 'naive-lagr-wnk'], 'blsNaiveTime', 'n', 'total_hum')
write_latex_case_macro(f, csv_data[csv_data.interpolation_method == 'naive-lagr-wnk'], 'naiveLagrTime', 'n', 'lagr_hum')
write_latex_case_macro(f, csv_data[csv_data.interpolation_method == 'naive-lagr-wnk'], 'multiexpTime', 'n', 'multiexp_hum')
write_latex_case_macro(f, csv_data[csv_data.interpolation_method == 'fft-eval'], 'blsEffTime', 'n', 'total_hum')
write_latex_case_macro(f, csv_data[csv_data.interpolation_method == 'fft-eval'], 'fastLagrTime', 'n', 'lagr_hum')
# compute the improvement of FFT over naive Lagr
naive = csv_data[csv_data.interpolation_method == 'naive-lagr-wnk']['total_usec']
eff = csv_data[csv_data.interpolation_method == 'fft-eval']['total_usec']
improv = naive.values / eff.values.astype(float)
print eff.values
print naive.values
improv_data = pandas.concat(
[
pandas.DataFrame(csv_data.n.unique(), columns=["n"]),
pandas.DataFrame(improv, columns=["improv"])
],
axis=1)
improv_data['improv'] = improv_data['improv'].round(decimals=2)
# extract the threshold # of players n at which we beat naive
min_improv = 1.2
outperform = improv_data[(improv_data.improv > 1.2) & (improv_data.n > 64)].copy() # we might beat the naive scheme at small thresholds too, but then later on we don't beat it anymore
outperform.reset_index(drop=True, inplace=True) # because copy() does not renumber the rows of the dataframe
outperform.sort_values(by='improv', ascending=True)
outperform_num = int(outperform.ix[0]['n'])
improv_data['improv'] = improv_data['improv'].astype(str) + improvLatexSymb
#print improv_data
write_latex_case_macro(f, improv_data, 'blsTimeImprov', 'n', 'improv')
print "Starts outperforming naive at:", outperform_num
f.write("\\newcommand{\\blsOutperformN}{" + str(outperform_num) + "}\n")
| [
"yurek2@illinois.edu"
] | yurek2@illinois.edu |
4fc430f6a69948ed46608cd576153baecb1b687a | 10679b5e5f52d8508251a77734d7b6ad3e54598f | /ansible/lib/ansible/modules/network/netvisor/pn_ipv6security_raguard.py | bc2e56eb3a08f979c821245a9795e19830d8fb95 | [] | no_license | rajaspachipulusu17/repo_with_code_refactoring | 25a695d7ecc79283501e74f92addd21d28af043b | f16aa2e2e0a7e85255bade2e686264425c2e68cb | refs/heads/master | 2020-05-18T16:38:15.967389 | 2019-08-06T10:35:08 | 2019-08-06T10:35:08 | 184,531,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,350 | py | #!/usr/bin/python
""" PN CLI ipv6security-raguard-create/modify/delete """
# Copyright 2018 Pluribus Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_netvisor import pn_cli
DOCUMENTATION = """
---
module: pn_ipv6security_raguard
author: "Pluribus Networks (devops@pluribusnetworks.com)"
version: 2
short_description: CLI command to create/modify/delete ipv6security-raguard.
description:
- C(create): Add ipv6 RA Guard Policy
- C(modify): Update ipv6 RA guard Policy
- C(delete): Remove ipv6 RA Guard Policy
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
pn_action:
description:
- ipv6security-raguard configuration command.
required: true
choices: ['create', 'modify', 'delete']
type: str
pn_device:
description:
- RA Guard Device: host or router
required: false
choices: ['host', 'router']
pn_access_list:
description:
- RA Guard Access List of Source IPs
required: false
type: str
pn_prefix_list:
description:
- RA Guard Prefix List
required: false
type: str
pn_router_priority:
description:
- RA Guard Router Priority
required: false
choices: ['low', 'medium', 'high']
pn_name:
description:
- RA Guard Policy Name
required: false
type: str
"""
EXAMPLES = """
- name: create ipv6 security RA guard
pn_ipv6security_raguard:
pn_action: 'create'
pn_name: 'test'
pn_device: 'router'
pn_access_list: 'block_ra'
pn_prefix_list: 'block_prefix'
- name: delete ipv6 security RA guard
pn_ipv6security_raguard:
pn_action: 'delete'
pn_name: 'test'
"""
RETURN = """
command:
description: the CLI command run on the target node.
stdout:
description: set of responses from the ipv6security-raguard command.
returned: always
type: list
stderr:
description: set of error responses from the ipv6security-raguard command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
action = module.params['pn_action']
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
# Response in JSON format
if err:
module.fail_json(
command=' '.join(cli),
stderr=err.strip(),
msg="ipv6security-raguard %s operation failed" % action,
changed=False
)
if out:
module.exit_json(
command=' '.join(cli),
stdout=out.strip(),
msg="ipv6security-raguard %s operation completed" % action,
changed=True
)
else:
module.exit_json(
command=' '.join(cli),
msg="ipv6security-raguard %s operation completed" % action,
changed=True
)
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
pn_action=dict(required=True, type='str',
choices=['create', 'modify', 'delete']),
pn_device=dict(required=False, type='str',
choices=['host', 'router']),
pn_access_list=dict(required=False, type='str'),
pn_prefix_list=dict(required=False, type='str'),
pn_router_priority=dict(required=False, type='str',
choices=['low', 'medium', 'high']),
pn_name=dict(required=False, type='str'),
)
)
# Accessing the arguments
action = module.params['pn_action']
device = module.params['pn_device']
access_list = module.params['pn_access_list']
prefix_list = module.params['pn_prefix_list']
router_priority = module.params['pn_router_priority']
name = module.params['pn_name']
# Building the CLI command string
cli = pn_cli(module)
cli += 'ipv6security-raguard-' + action
cli += ' name ' + name
if action in ['create', 'modify']:
if device:
cli += ' device ' + device
if access_list:
cli += ' access-list ' + access_list
if prefix_list:
cli += ' prefix-list ' + prefix_list
if router_priority:
cli += ' router-priority ' + router_priority
run_cli(module, cli)
if __name__ == '__main__':
main()
| [
"rajas.pachipulusu@calsoftinc.com"
] | rajas.pachipulusu@calsoftinc.com |
ced5cdbcfa8beb5a466cbf64c8a4e987e956ac19 | 973666c1cbb0ef809b72b3d70e804dd38f67e358 | /xiaoshou/adminx.py | 503079adc415a2bcc717790947d855f786dfefdf | [] | no_license | zqh1107668229/ERP_system | cc9f5bf11487564cb06ba689390a0cb1d0fd6784 | d8b65e3fffef88abf92f049765f386969dc499fc | refs/heads/master | 2020-04-06T18:32:30.744956 | 2018-11-15T11:50:21 | 2018-11-15T11:50:21 | 157,702,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,558 | py | import xadmin
from .models import Customer,SaleOrder,SaleOrder_Detail,Buy,Buy_Detail,Sale,Sale_Detail
class CustomerAdmin(object):
# 显示字段
list_display = ['customer_id','name','address','phone','PostalCode']
# 搜索
search_fields = ['customer_id','name','address','phone','PostalCode']
# 过滤器
list_filter = ['customer_id','name','address','phone','PostalCode']
class SaleOrderAdmin(object):
# 显示字段
list_display = ['saleorder_id','writedate','insuredate','enddate','dept','supplier','employee']
# 搜索
search_fields = ['saleorder_id','writedate','insuredate','enddate','dept','supplier','employee']
# 过滤器
list_filter = ['saleorder_id','writedate','insuredate','enddate','dept','supplier','employee']
class SaleOrder_DetailAdmin(object):
# 显示字段
list_display = ['saleorder_id','product_id','quantity','peice']
# 搜索
search_fields = ['saleorder_id','product_id','quantity','peice']
# 过滤器
list_filter = ['saleorder_id','product_id','quantity','peice']
class BuyAdmin(object):
# 显示字段
list_display = ['buy_id','comedate','dept','employee']
# 搜索
search_fields = ['buy_id','comedate','dept','employee']
# 过滤器
list_filter = ['buy_id','comedate','dept','employee']
class Buy_DetailAdmin(object):
# 显示字段
list_display = ['buy_id','product_id','buyorder_id','quantity','peice']
# 搜索
search_fields = ['buy_id','product_id','buyorder_id','quantity','peice']
# 过滤器
list_filter = ['buy_id','product_id','buyorder_id','quantity','peice']
class SaleAdmin(object):
# 显示字段
list_display = ['sale_id','saledate','dept','employee']
# 搜索
search_fields = ['sale_id','saledate','dept','employee']
# 过滤器
list_filter = ['sale_id','saledate','dept','employee']
class Sale_DetailAdmin(object):
# 显示字段
list_display = ['sale_id','product_id','buyorder_id','quantity','peice','discount']
# 搜索
search_fields = ['sale_id','product_id','buyorder_id','quantity','peice','discount']
# 过滤器
list_filter = ['sale_id','product_id','buyorder_id','quantity','peice','discount']
xadmin.site.register(Customer,CustomerAdmin)
xadmin.site.register(SaleOrder,SaleOrderAdmin)
xadmin.site.register(SaleOrder_Detail,SaleOrder_DetailAdmin)
xadmin.site.register(Buy,BuyAdmin)
xadmin.site.register(Buy_Detail,Buy_DetailAdmin)
xadmin.site.register(Sale,SaleAdmin)
xadmin.site.register(Sale_Detail,Sale_DetailAdmin)
| [
"1107668229@qq.com"
] | 1107668229@qq.com |
66eb19b40feed309c72cca813f8b9daff434849c | 48c0a0a22a0a43546cc41ec91e07a767645ed45a | /comments/migrations/0001_initial.py | a2ddac3cf7212a2c571e12029ad9675b1552cef4 | [] | no_license | 10256017/django-practice | 56152523e8a482c537a55806328dfc9b3902b0d0 | b600526eb78d0b5c8c3a3e99e5a9c0a210c61a2d | refs/heads/master | 2022-12-10T10:57:39.959739 | 2019-07-18T08:14:25 | 2019-07-18T08:14:25 | 197,117,596 | 0 | 0 | null | 2022-12-08T05:54:39 | 2019-07-16T04:00:40 | Python | UTF-8 | Python | false | false | 563 | py | # Generated by Django 2.2.3 on 2019-07-16 05:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=3000)),
('create_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"lovehyt121312@gmail.com"
] | lovehyt121312@gmail.com |
9d53d0415cfb8d1a302c8be034149c90ae17bada | c6abfc400f2f203fabd2cad69929886a8550fc98 | /dropdown/models.py | 4b6bac51fac32af563c0512fa38b165eb622a0d1 | [] | no_license | Amjad8286/DependentDropdown | 192a4b3252800fc1791ea0918d63e00012507a41 | 745a0fecc508831fa2df56f27aa979ea1d5fa5c1 | refs/heads/master | 2023-07-24T22:15:13.337122 | 2021-08-30T13:27:42 | 2021-08-30T13:27:42 | 400,624,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | from django.db import models
# Create your models here.
class Country(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class State(models.Model):
country = models.ForeignKey(Country,on_delete=models.CASCADE)
state = models.CharField(max_length=50)
def __str__(self):
return self.state
class City(models.Model):
state = models.ForeignKey(State,on_delete=models.CASCADE)
city = models.CharField(max_length=50)
def __str__(self):
return self.city
| [
"ansari.amjad8286@gamil.com"
] | ansari.amjad8286@gamil.com |
cbd4dff58fb534940486ad7a745bc32cfa732058 | e268832c9a5ecd465851347fc870ccf92e073309 | /Top_Interview_Questions/48._Rotate_Image/solution.py | 0c0c4d7037d1b48406cc887ceb7c12e888dc1f8c | [] | no_license | hkim150/Leetcode-Problems | a995e74ecca6b34213d9fa34b0d84eea649f57c2 | 1239b805a819e4512860a6507b332636941ff3e9 | refs/heads/master | 2020-12-04T08:06:42.981990 | 2020-10-03T00:20:29 | 2020-10-03T00:20:29 | 231,688,355 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# we can first transpose and then reverse the row
if not matrix:
return
l = len(matrix)
if l <= 1:
return
for row in range(l):
for col in range(row, l):
matrix[row][col], matrix[col][row] = matrix[col][row], matrix[row][col]
for row in range(l):
matrix[row].reverse() | [
"hkim150@illinois.edu"
] | hkim150@illinois.edu |
30e1a84c06ca00940832ccc37ecb9ec95c660bef | f1c0ce462b185f7b633acb04ee8a85fcda87c748 | /tests/ui/help/test_application_help.py | 937e44be50886b1b7c39936744091d0878f09a36 | [
"MIT"
] | permissive | cole/clikit | 056c5f388043a43971a633470122b291fb51d23f | bdb286672f93e1ff7df1d864fb0751476e034d57 | refs/heads/master | 2020-09-08T12:11:05.287042 | 2019-11-12T05:02:42 | 2020-02-22T22:19:49 | 221,129,623 | 1 | 1 | MIT | 2019-11-12T04:27:10 | 2019-11-12T04:27:09 | null | UTF-8 | Python | false | false | 5,056 | py | from clikit import ConsoleApplication
from clikit.api.args import Args
from clikit.api.args.format import ArgsFormat
from clikit.api.args.format import Option
from clikit.api.config import ApplicationConfig
from clikit.args import ArgvArgs
from clikit.ui.help import ApplicationHelp
def test_render(io):
config = ApplicationConfig("test-bin")
config.set_display_name("The Application")
config.add_argument(
"global-argument", description='Description of "global-argument"'
)
config.add_option("global-option", description='Description of "global-option"')
with config.command("command1") as c:
c.set_description('Description of "command1"')
with config.command("command2") as c:
c.set_description('Description of "command2"')
with config.command("longer-command3") as c:
c.set_description('Description of "longer-command3"')
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application
USAGE
test-bin [--global-option] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
--global-option Description of "global-option"
AVAILABLE COMMANDS
command1 Description of "command1"
command2 Description of "command2"
longer-command3 Description of "longer-command3"
"""
assert expected == io.fetch_output()
def test_sort_commands(io):
config = ApplicationConfig("test-bin")
config.set_display_name("The Application")
config.create_command("command3")
config.create_command("command1")
config.create_command("command2")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
AVAILABLE COMMANDS
command1
command2
command3
"""
assert expected == io.fetch_output()
def test_render_version(io):
config = ApplicationConfig("test-bin", "1.2.3")
config.set_display_name("The Application")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application version 1.2.3
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_default_display_name(io):
config = ApplicationConfig("test-bin")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Test Bin
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_default_no_name(io):
config = ApplicationConfig()
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_global_options_with_preferred_short_name(io):
config = ApplicationConfig()
config.add_option(
"global-option", "g", Option.PREFER_SHORT_NAME, 'Description of "global-option"'
)
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console [-g] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
-g (--global-option) Description of "global-option"
"""
assert expected == io.fetch_output()
def test_render_global_options_with_preferred_long_name(io):
config = ApplicationConfig()
config.add_option(
"global-option", "g", Option.PREFER_LONG_NAME, 'Description of "global-option"'
)
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console [--global-option] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
--global-option (-g) Description of "global-option"
"""
assert expected == io.fetch_output()
def test_render_description(io):
config = ApplicationConfig()
config.set_help("The help for {script_name}\n\nSecond paragraph")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
DESCRIPTION
The help for console
Second paragraph
"""
assert expected == io.fetch_output()
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
afa2880ef7c9ad5d7d0c8b552c93ec596a1567aa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02712/s236119093.py | 71fea111b5f86f505bae1ae0688f57fdca6fed08 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | import sys
input = sys.stdin.readline
N = int(input())
ans = 0
for i in range(1,N+1):
if i%3 != 0 and i%5 !=0:
ans += i
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3fb518dd7c6ff02dc92ef2aab9f585d679d2e42e | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/HUAWEI-MIRROR-MIB.py | aa7dce312e3994d2b844631d194be929f967ddd7 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 26,682 | py | #
# PySNMP MIB module HUAWEI-MIRROR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-MIRROR-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:35:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
huaweiMgmt, = mibBuilder.importSymbols("HUAWEI-MIB", "huaweiMgmt")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter64, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, TimeTicks, Gauge32, ObjectIdentity, MibIdentifier, Counter32, NotificationType, Unsigned32, IpAddress, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "TimeTicks", "Gauge32", "ObjectIdentity", "MibIdentifier", "Counter32", "NotificationType", "Unsigned32", "IpAddress", "ModuleIdentity", "iso")
TruthValue, RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "TextualConvention", "DisplayString")
hwMirrorMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 162))
if mibBuilder.loadTexts: hwMirrorMIB.setLastUpdated('200801012030Z')
if mibBuilder.loadTexts: hwMirrorMIB.setOrganization('Huawei Technologies co.,Ltd.')
hwMirrorMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1))
hwLocalMirror = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1))
hwLocalObserveTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1), )
if mibBuilder.loadTexts: hwLocalObserveTable.setStatus('current')
hwLocalObserveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalObservePort"))
if mibBuilder.loadTexts: hwLocalObserveEntry.setStatus('current')
hwLocalObservePort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwLocalObservePort.setStatus('current')
hwLocalObserveIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalObserveIndex.setStatus('current')
hwLocalObserveWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalObserveWithLinkLayerHeader.setStatus('current')
hwLocalObserveRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalObserveRowStatus.setStatus('current')
hwLocalPortMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2), )
if mibBuilder.loadTexts: hwLocalPortMirrorTable.setStatus('current')
hwLocalPortMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalMirrorPort"))
if mibBuilder.loadTexts: hwLocalPortMirrorEntry.setStatus('current')
hwLocalMirrorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwLocalMirrorPort.setStatus('current')
hwLocalMirrorBearing = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("inout", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalMirrorBearing.setStatus('current')
hwLocalCpuPacketFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalCpuPacketFlag.setStatus('current')
hwLocalPortMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalPortMirrorCar.setStatus('current')
hwLocalPortMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalPortMirrorRowStatus.setStatus('current')
hwLocalFlowMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3), )
if mibBuilder.loadTexts: hwLocalFlowMirrorTable.setStatus('current')
hwLocalFlowMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalBehaviorName"))
if mibBuilder.loadTexts: hwLocalFlowMirrorEntry.setStatus('current')
hwLocalBehaviorName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: hwLocalBehaviorName.setStatus('current')
hwLocalFlowMirrorEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 2), EnabledStatus().clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalFlowMirrorEnable.setStatus('current')
hwLocalFlowMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalFlowMirrorCar.setStatus('current')
hwLocalFlowMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalFlowMirrorRowStatus.setStatus('current')
hwLocalSlotMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4), )
if mibBuilder.loadTexts: hwLocalSlotMirrorTable.setStatus('current')
hwLocalSlotMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalSlotNo"))
if mibBuilder.loadTexts: hwLocalSlotMirrorEntry.setStatus('current')
hwLocalSlotNo = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128)))
if mibBuilder.loadTexts: hwLocalSlotNo.setStatus('current')
hwSlotObserveIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSlotObserveIndex.setStatus('current')
hwLocalSlotMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalSlotMirrorRowStatus.setStatus('current')
hwPortMirrorInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5), )
if mibBuilder.loadTexts: hwPortMirrorInfoTable.setStatus('current')
hwPortMirrorInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwMirrorPortIndex"))
if mibBuilder.loadTexts: hwPortMirrorInfoEntry.setStatus('current')
hwMirrorPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwMirrorPortIndex.setStatus('current')
hwMirrorType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local", 1), ("remote", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorType.setStatus('current')
hwMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorCar.setStatus('current')
hwMirrorClass = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("port", 1), ("policy", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorClass.setStatus('current')
hwMirrorBearing = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("inout", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorBearing.setStatus('current')
hwMirrorCpuPacketFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 6), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorCpuPacketFlag.setStatus('current')
hwMirrorWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorWithLinkLayerHeader.setStatus('current')
hwRemoteMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRemoteMirrorInstanceName.setStatus('current')
hwRemoteMirror = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2))
hwRemoteObserveTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1), )
if mibBuilder.loadTexts: hwRemoteObserveTable.setStatus('current')
hwRemoteObserveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwRemoteObservePort"))
if mibBuilder.loadTexts: hwRemoteObserveEntry.setStatus('current')
hwRemoteObservePort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwRemoteObservePort.setStatus('current')
hwRemoteIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteIdentifier.setStatus('current')
hwRemoteDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteDescription.setStatus('current')
hwRemoteObserveWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteObserveWithLinkLayerHeader.setStatus('current')
hwRemoteObserveRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteObserveRowStatus.setStatus('current')
hwRemotePortMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2), )
if mibBuilder.loadTexts: hwRemotePortMirrorTable.setStatus('current')
hwRemotePortMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwRemoteMirrorPort"))
if mibBuilder.loadTexts: hwRemotePortMirrorEntry.setStatus('current')
hwRemoteMirrorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwRemoteMirrorPort.setStatus('current')
hwRemoteMirrorBearing = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("inout", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteMirrorBearing.setStatus('current')
hwRemoteCpuPacketFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteCpuPacketFlag.setStatus('current')
hwPortMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwPortMirrorInstanceName.setStatus('current')
hwRemotePortMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemotePortMirrorCar.setStatus('current')
hwRemotePortMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemotePortMirrorRowStatus.setStatus('current')
hwRemoteFlowMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3), )
if mibBuilder.loadTexts: hwRemoteFlowMirrorTable.setStatus('current')
hwRemoteFlowMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwRemoteBehaviorName"))
if mibBuilder.loadTexts: hwRemoteFlowMirrorEntry.setStatus('current')
hwRemoteBehaviorName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: hwRemoteBehaviorName.setStatus('current')
hwFlowMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwFlowMirrorInstanceName.setStatus('current')
hwRemoteFlowMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteFlowMirrorCar.setStatus('current')
hwRemoteFlowMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteFlowMirrorRowStatus.setStatus('current')
hwRemoteMirrorInstanceTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4), )
if mibBuilder.loadTexts: hwRemoteMirrorInstanceTable.setStatus('current')
hwRemoteMirrorInstanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwMirrorInstanceName"))
if mibBuilder.loadTexts: hwRemoteMirrorInstanceEntry.setStatus('current')
hwMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: hwMirrorInstanceName.setStatus('current')
hwRemoteObservePortIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteObservePortIp.setStatus('current')
hwRemoteMirrorIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 64), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteMirrorIdentifier.setStatus('current')
hwRemoteMirrorWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteMirrorWithLinkLayerHeader.setStatus('current')
hwMirrorFlowClass = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("be", 0), ("af1", 1), ("af2", 2), ("af3", 3), ("af4", 4), ("ef", 5), ("cs6", 6), ("cs7", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorFlowClass.setStatus('current')
hwMirrorSliceSize = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(64, 9600), ))).setUnits('Byte').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorSliceSize.setStatus('current')
hwMirrorTunnelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorTunnelIndex.setStatus('current')
hwMirrorTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("lspTunnel", 1), ("teTunnel", 2), ("greTunnel", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorTunnelType.setStatus('current')
hwMirrorTunnelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorTunnelStatus.setStatus('current')
hwMirrorTunnelPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 19))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorTunnelPolicy.setStatus('current')
hwMirrorInstanceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorInstanceRowStatus.setStatus('current')
hwMirrorConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11))
hwMirrorCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 1))
hwMirrorCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 1, 1)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalObserveGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalSlotMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorInfoGroup"), ("HUAWEI-MIRROR-MIB", "hwRemoteObserveGroup"), ("HUAWEI-MIRROR-MIB", "hwRemotePortMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwRemoteFlowMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorInstanceGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwMirrorCompliance = hwMirrorCompliance.setStatus('current')
hwBaseMirrorGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2))
hwLocalObserveGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 1)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalObserveIndex"), ("HUAWEI-MIRROR-MIB", "hwLocalObserveWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwLocalObserveRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalObserveGroup = hwLocalObserveGroup.setStatus('current')
hwLocalPortMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 2)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalMirrorBearing"), ("HUAWEI-MIRROR-MIB", "hwLocalCpuPacketFlag"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalPortMirrorGroup = hwLocalPortMirrorGroup.setStatus('current')
hwLocalFlowMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 3)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorEnable"), ("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalFlowMirrorGroup = hwLocalFlowMirrorGroup.setStatus('current')
hwLocalSlotMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 4)).setObjects(("HUAWEI-MIRROR-MIB", "hwSlotObserveIndex"), ("HUAWEI-MIRROR-MIB", "hwLocalSlotMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalSlotMirrorGroup = hwLocalSlotMirrorGroup.setStatus('current')
hwLocalPortMirrorInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 5)).setObjects(("HUAWEI-MIRROR-MIB", "hwMirrorType"), ("HUAWEI-MIRROR-MIB", "hwMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwMirrorClass"), ("HUAWEI-MIRROR-MIB", "hwMirrorBearing"), ("HUAWEI-MIRROR-MIB", "hwMirrorCpuPacketFlag"), ("HUAWEI-MIRROR-MIB", "hwMirrorWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorInstanceName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalPortMirrorInfoGroup = hwLocalPortMirrorInfoGroup.setStatus('current')
hwRemoteObserveGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 6)).setObjects(("HUAWEI-MIRROR-MIB", "hwRemoteIdentifier"), ("HUAWEI-MIRROR-MIB", "hwRemoteDescription"), ("HUAWEI-MIRROR-MIB", "hwRemoteObserveWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwRemoteObserveRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemoteObserveGroup = hwRemoteObserveGroup.setStatus('current')
hwRemotePortMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 7)).setObjects(("HUAWEI-MIRROR-MIB", "hwRemoteMirrorBearing"), ("HUAWEI-MIRROR-MIB", "hwRemoteCpuPacketFlag"), ("HUAWEI-MIRROR-MIB", "hwPortMirrorInstanceName"), ("HUAWEI-MIRROR-MIB", "hwRemotePortMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwRemotePortMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemotePortMirrorGroup = hwRemotePortMirrorGroup.setStatus('current')
hwRemoteFlowMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 8)).setObjects(("HUAWEI-MIRROR-MIB", "hwFlowMirrorInstanceName"), ("HUAWEI-MIRROR-MIB", "hwRemoteFlowMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwRemoteFlowMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemoteFlowMirrorGroup = hwRemoteFlowMirrorGroup.setStatus('current')
hwRemoteMirrorInstanceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 9)).setObjects(("HUAWEI-MIRROR-MIB", "hwRemoteObservePortIp"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorIdentifier"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwMirrorFlowClass"), ("HUAWEI-MIRROR-MIB", "hwMirrorSliceSize"), ("HUAWEI-MIRROR-MIB", "hwMirrorTunnelIndex"), ("HUAWEI-MIRROR-MIB", "hwMirrorTunnelType"), ("HUAWEI-MIRROR-MIB", "hwMirrorTunnelStatus"), ("HUAWEI-MIRROR-MIB", "hwMirrorInstanceRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemoteMirrorInstanceGroup = hwRemoteMirrorInstanceGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-MIRROR-MIB", hwLocalFlowMirrorCar=hwLocalFlowMirrorCar, hwLocalObserveTable=hwLocalObserveTable, hwRemoteFlowMirrorRowStatus=hwRemoteFlowMirrorRowStatus, hwLocalPortMirrorTable=hwLocalPortMirrorTable, hwLocalPortMirrorCar=hwLocalPortMirrorCar, hwPortMirrorInstanceName=hwPortMirrorInstanceName, hwMirrorFlowClass=hwMirrorFlowClass, hwMirrorBearing=hwMirrorBearing, hwRemoteObserveGroup=hwRemoteObserveGroup, hwRemoteFlowMirrorTable=hwRemoteFlowMirrorTable, hwRemotePortMirrorCar=hwRemotePortMirrorCar, hwMirrorPortIndex=hwMirrorPortIndex, hwMirrorCar=hwMirrorCar, hwRemoteMirrorInstanceName=hwRemoteMirrorInstanceName, hwRemoteMirrorInstanceTable=hwRemoteMirrorInstanceTable, hwRemoteMirrorBearing=hwRemoteMirrorBearing, hwLocalObserveRowStatus=hwLocalObserveRowStatus, hwRemoteObserveRowStatus=hwRemoteObserveRowStatus, hwLocalMirror=hwLocalMirror, hwMirrorCompliance=hwMirrorCompliance, hwLocalFlowMirrorEntry=hwLocalFlowMirrorEntry, hwMirrorTunnelStatus=hwMirrorTunnelStatus, hwLocalMirrorPort=hwLocalMirrorPort, hwMirrorType=hwMirrorType, hwLocalSlotNo=hwLocalSlotNo, hwRemoteFlowMirrorEntry=hwRemoteFlowMirrorEntry, hwMirrorWithLinkLayerHeader=hwMirrorWithLinkLayerHeader, hwLocalBehaviorName=hwLocalBehaviorName, hwRemoteObserveWithLinkLayerHeader=hwRemoteObserveWithLinkLayerHeader, hwMirrorMIB=hwMirrorMIB, hwRemotePortMirrorTable=hwRemotePortMirrorTable, hwMirrorConformance=hwMirrorConformance, hwLocalObserveGroup=hwLocalObserveGroup, hwRemoteDescription=hwRemoteDescription, hwLocalPortMirrorGroup=hwLocalPortMirrorGroup, hwLocalCpuPacketFlag=hwLocalCpuPacketFlag, hwRemoteMirrorWithLinkLayerHeader=hwRemoteMirrorWithLinkLayerHeader, hwSlotObserveIndex=hwSlotObserveIndex, hwLocalFlowMirrorTable=hwLocalFlowMirrorTable, hwLocalObservePort=hwLocalObservePort, hwLocalFlowMirrorEnable=hwLocalFlowMirrorEnable, hwMirrorTunnelPolicy=hwMirrorTunnelPolicy, hwLocalPortMirrorInfoGroup=hwLocalPortMirrorInfoGroup, hwLocalFlowMirrorRowStatus=hwLocalFlowMirrorRowStatus, hwMirrorCompliances=hwMirrorCompliances, hwRemoteFlowMirrorGroup=hwRemoteFlowMirrorGroup, hwLocalObserveWithLinkLayerHeader=hwLocalObserveWithLinkLayerHeader, hwRemotePortMirrorGroup=hwRemotePortMirrorGroup, hwRemoteMirrorPort=hwRemoteMirrorPort, hwLocalMirrorBearing=hwLocalMirrorBearing, hwPortMirrorInfoEntry=hwPortMirrorInfoEntry, hwMirrorTunnelIndex=hwMirrorTunnelIndex, hwMirrorCpuPacketFlag=hwMirrorCpuPacketFlag, hwMirrorClass=hwMirrorClass, hwLocalPortMirrorRowStatus=hwLocalPortMirrorRowStatus, hwLocalFlowMirrorGroup=hwLocalFlowMirrorGroup, hwLocalPortMirrorEntry=hwLocalPortMirrorEntry, hwFlowMirrorInstanceName=hwFlowMirrorInstanceName, hwLocalObserveEntry=hwLocalObserveEntry, hwRemoteFlowMirrorCar=hwRemoteFlowMirrorCar, hwPortMirrorInfoTable=hwPortMirrorInfoTable, hwMirrorSliceSize=hwMirrorSliceSize, hwRemoteBehaviorName=hwRemoteBehaviorName, hwLocalSlotMirrorEntry=hwLocalSlotMirrorEntry, hwRemoteMirrorIdentifier=hwRemoteMirrorIdentifier, hwLocalSlotMirrorTable=hwLocalSlotMirrorTable, hwMirrorTunnelType=hwMirrorTunnelType, hwRemoteMirror=hwRemoteMirror, hwMirrorInstanceName=hwMirrorInstanceName, hwMirrorMIBObjects=hwMirrorMIBObjects, hwRemoteObserveEntry=hwRemoteObserveEntry, hwRemoteObserveTable=hwRemoteObserveTable, hwLocalSlotMirrorRowStatus=hwLocalSlotMirrorRowStatus, hwBaseMirrorGroup=hwBaseMirrorGroup, hwRemoteIdentifier=hwRemoteIdentifier, hwRemoteObservePortIp=hwRemoteObservePortIp, hwRemotePortMirrorEntry=hwRemotePortMirrorEntry, hwLocalSlotMirrorGroup=hwLocalSlotMirrorGroup, hwRemoteCpuPacketFlag=hwRemoteCpuPacketFlag, hwRemoteMirrorInstanceGroup=hwRemoteMirrorInstanceGroup, hwLocalObserveIndex=hwLocalObserveIndex, hwRemotePortMirrorRowStatus=hwRemotePortMirrorRowStatus, PYSNMP_MODULE_ID=hwMirrorMIB, hwMirrorInstanceRowStatus=hwMirrorInstanceRowStatus, hwRemoteObservePort=hwRemoteObservePort, hwRemoteMirrorInstanceEntry=hwRemoteMirrorInstanceEntry)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
86b85b4c0923589add18c135d861f660562dc9ff | 5c2a504d630d918ef183a0fc0ad0e8c22052bf75 | /manage.py | 0e889816b0eecc814cfae92267f50913e71b2b97 | [] | no_license | irfanmk08/demopgroject | 15f9ef5205e19d976be0225bc399ed65562b7682 | 0802b5b465aee83491dd17cee98be45a13d0d4bf | refs/heads/master | 2023-02-05T12:26:46.223915 | 2020-12-01T04:23:57 | 2020-12-01T04:23:57 | 316,126,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testpgct.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"irfanmk08@gmail.com"
] | irfanmk08@gmail.com |
fd423a0b542b090db684de0a6a6f97329d80eeda | 129ea5d4b576639da63cf94dd3d1adb27422aa03 | /ceshi.py | 77dff77147c176733aea9a6bd577b26228b2cbda | [] | no_license | lianzhang132/bookroom | 55392db40bdf4bfd4d49c33d4dfb60947f954061 | 2bebdbd90be3fc356efdb6514688d1b6c7cb3c48 | refs/heads/master | 2020-07-11T01:29:03.630781 | 2019-08-26T07:21:45 | 2019-08-26T07:21:45 | 204,419,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | import unittest
from run import app
import json
# # 可以借助于 urllib,requeset 发送请求
# import urllib
# import requests
# 登录测试
class LoginTest(unittest.TestCase):
# 测试用户名,密码 为空的测试方法
def setUp(self):
app.testing = True
# 调用测试代码之前一定会执行
# 初始化的代码 执行 放在这里
self.client = app.test_client()
def test_empty_username_password(self):
# app 对象 内置发送请求的方式 参数一 路由,参数二,数据
response = self.client.post('/login',data={})
json_dict = json.loads(response.data)
# print(json_dict)
# 断言
self.assertIn('errcode',json_dict,'数据格式返回错误')
self.assertEqual(1,json_dict['errcode'],'状态码返回错误')
# requests.post('/login')
def test_username_password(self):
response = self.client().post('/login', data={'uname':'xiaoming','upass':'abc'})
json_dict = json.loads(response.data)
# print(json_dict)
# 断言
self.assertIn('errcode', json_dict, '数据格式返回错误')
self.assertEqual(2, json_dict['errcode'], '用户名或者密码不正确')
# 注册测试
# class RegisterTest(unittest.TestCase):
# pass
# 订单 会员 购物车 模块测试
if __name__ == '__main__':
unittest.main() | [
"2327431669@qq.com"
] | 2327431669@qq.com |
28764e6985747a21ef890844cef47baaea2ac122 | 9d77c2434b0e804af2ecaf4a4ea6d99f6ac6d96d | /preprocessing/tf_image.py | b84b4284cb4af7ec6da652d92872d7f69efdf48a | [
"MIT"
] | permissive | UpCoder/ISBI_LiverLesionDetection | cce7c118601548eef8bfeeedfe4f50d88411a7df | e21ce360e55092a16acc4d147a1418a53545d562 | refs/heads/master | 2022-12-12T12:57:50.299718 | 2019-04-03T03:49:43 | 2019-04-03T03:49:43 | 179,206,267 | 8 | 1 | MIT | 2022-12-08T16:02:31 | 2019-04-03T03:49:00 | Python | UTF-8 | Python | false | false | 17,125 | py | # Copyright 2015 The TensorFlow Authors and Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom image operations.
Most of the following methods extend TensorFlow image library, and part of
the code is shameless copy-paste of the former!
"""
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import util
# =========================================================================== #
# Modification of TensorFlow image routines.
# =========================================================================== #
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
If `cond` is not a tensor, behave like an ordinary assert statement, except
that a empty list is returned. If `cond` is a tensor, return a list
containing a single TensorFlow assert op.
Args:
cond: Something evaluates to a boolean value. May be a tensor.
ex_type: The exception class to use.
msg: The error message.
Returns:
A list, containing at most one assert op.
"""
if _is_tensor(cond):
return [control_flow_ops.Assert(cond, [msg])]
else:
if not cond:
raise ex_type(msg)
else:
return []
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable))
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def _Check3DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
Args:
image: 3-D Tensor of shape [height, width, channels]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
raise ValueError("'image' must be three-dimensional.")
if require_static and not image_shape.is_fully_defined():
raise ValueError("'image' must be fully defined.")
if any(x == 0 for x in image_shape):
raise ValueError("all dims of 'image.shape' must be > 0: %s" %
image_shape)
if not image_shape.is_fully_defined():
return [check_ops.assert_positive(array_ops.shape(image),
["all dims of 'image.shape' "
"must be > 0."])]
else:
return []
def fix_image_flip_shape(image, result):
"""Set the shape to 3 dimensional if we don't know anything else.
Args:
image: original image size
result: flipped or transformed image
Returns:
An image whose shape is at least None,None,None.
"""
image_shape = image.get_shape()
if image_shape == tensor_shape.unknown_shape():
result.set_shape([None, None, None])
else:
result.set_shape(image_shape)
return result
# =========================================================================== #
# Image + BBoxes methods: cropping, resizing, flipping, ...
# =========================================================================== #
def bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
offset_y, offset_x,
target_height, target_width):
"""Adapt bounding boxes to crop or pad operations.
Coordinates are always supposed to be relative to the image.
Arguments:
bboxes: Tensor Nx4 with bboxes coordinates [y_min, x_min, y_max, x_max];
height, width: Original image dimension;
offset_y, offset_x: Offset to apply,
negative if cropping, positive if padding;
target_height, target_width: Target dimension after cropping / padding.
"""
with tf.name_scope('bboxes_crop_or_pad'):
# Rescale bounding boxes in pixels.
scale = tf.cast(tf.stack([height, width, height, width]), bboxes.dtype)
bboxes = bboxes * scale
xs *= tf.cast(width, bboxes.dtype)
ys *= tf.cast(height, bboxes.dtype)
# Add offset.
offset = tf.cast(tf.stack([offset_y, offset_x, offset_y, offset_x]), bboxes.dtype)
bboxes = bboxes + offset
xs += tf.cast(offset_x, bboxes.dtype)
ys += tf.cast(offset_y, bboxes.dtype)
# Rescale to target dimension.
scale = tf.cast(tf.stack([target_height, target_width,
target_height, target_width]), bboxes.dtype)
bboxes = bboxes / scale
xs = xs / tf.cast(target_width, xs.dtype)
ys = ys / tf.cast(target_height, ys.dtype)
return bboxes, xs, ys
def resize_image_bboxes_with_crop_or_pad(image, bboxes, xs, ys,
target_height, target_width, mask_image=None):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image of shape
`[target_height, target_width, channels]`
"""
with tf.name_scope('resize_with_crop_or_pad'):
image = ops.convert_to_tensor(image, name='image')
if mask_image is not None:
print('Image: ', image)
print('MaskImage: ', mask_image)
mask_image = ops.convert_to_tensor(mask_image, name='image')
assert_ops = []
assert_ops += _Check3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
height, width, _ = _ImageDimensions(image)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
height_crop = min_(target_height, height)
width_crop = min_(target_width, width)
cropped = tf.image.crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
if mask_image is not None:
cropped_mask_image = tf.image.crop_to_bounding_box(mask_image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
-offset_crop_height, -offset_crop_width,
height_crop, width_crop)
# Maybe pad if needed.
resized = tf.image.pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
if mask_image is not None:
resized_mask_image = tf.image.pad_to_bounding_box(cropped_mask_image, offset_pad_height, offset_pad_width,
target_height, target_width)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height_crop, width_crop,
offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
resized_height, resized_width, _ = _ImageDimensions(resized)
assert_ops = []
assert_ops += _assert(equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if mask_image is None:
return resized, None, bboxes, xs, ys
else:
return resized, resized_mask_image, bboxes, xs, ys
def resize_image(image, size,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
"""Resize an image and bounding boxes.
"""
# Resize image.
with tf.name_scope('resize_image'):
height, width, channels = _ImageDimensions(image)
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(image, size,
method, align_corners)
image = tf.reshape(image, tf.stack([size[0], size[1], channels]))
return image
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
def random_rotate90(image, bboxes, xs, ys, mask_image=None):
with tf.name_scope('random_rotate90'):
k = random_ops.random_uniform([], 0, 10000)
k = tf.cast(k, tf.int32)
image_shape = tf.shape(image)
h, w = image_shape[0], image_shape[1]
image = tf.image.rot90(image, k = k)
if mask_image is not None:
mask_image = tf.image.rot90(mask_image, k=k)
bboxes, xs, ys = rotate90(bboxes, xs, ys, k)
if mask_image is None:
return image, bboxes, xs, ys
else:
return image, mask_image, bboxes, xs, ys
def tf_rotate_point_by_90(x, y, k):
return tf.py_func(util.img.rotate_point_by_90, [x, y, k],
[tf.float32, tf.float32])
def rotate90(bboxes, xs, ys, k):
# bboxes = tf.Print(bboxes, [bboxes], 'before rotate',summarize = 100)
ymin, xmin, ymax, xmax = [bboxes[:, i] for i in range(4)]
xmin, ymin = tf_rotate_point_by_90(xmin, ymin, k)
xmax, ymax = tf_rotate_point_by_90(xmax, ymax, k)
new_xmin = tf.minimum(xmin, xmax)
new_xmax = tf.maximum(xmin, xmax)
new_ymin = tf.minimum(ymin, ymax)
new_ymax = tf.maximum(ymin, ymax)
bboxes = tf.stack([new_ymin, new_xmin, new_ymax, new_xmax])
bboxes = tf.transpose(bboxes)
xs, ys = tf_rotate_point_by_90(xs, ys, k)
return bboxes, xs, ys
if __name__ == "__main__":
import util
image_path = '~/Pictures/img_1.jpg'
image_data = util.img.imread(image_path, rgb = True)
bbox_data = [[100, 100, 300, 300], [400, 400, 500, 500]]
def draw_bbox(img, bbox):
xmin, ymin, xmax, ymax = bbox
util.img.rectangle(img, left_up = (xmin, ymin),
right_bottom = (xmax, ymax),
color = util.img.COLOR_RGB_RED,
border_width = 10)
image = tf.placeholder(dtype = tf.uint8)
bboxes = tf.placeholder(dtype = tf.int32)
bboxes_float32 = tf.cast(bboxes, dtype = tf.float32)
image_shape = tf.cast(tf.shape(image), dtype = tf.float32)
image_h, image_w = image_shape[0], image_shape[1]
xmin, ymin, xmax, ymax = [bboxes_float32[:, i] for i in range(4)]
bboxes_normed = tf.stack([xmin / image_w, ymin / image_h,
xmax / image_w, ymax / image_h])
bboxes_normed = tf.transpose(bboxes_normed)
target_height = image_h * 2
target_width = image_w * 2
target_height = tf.cast(target_height, tf.int32)
target_width = tf.cast(target_width, tf.int32)
processed_image, processed_bboxes = resize_image_bboxes_with_crop_or_pad(image, bboxes_normed,
target_height, target_width)
with tf.Session() as sess:
resized_image, resized_bboxes = sess.run(
[processed_image, processed_bboxes],
feed_dict = {image: image_data, bboxes: bbox_data})
for _bbox in bbox_data:
draw_bbox(image_data, _bbox)
util.plt.imshow('image_data', image_data)
h, w = resized_image.shape[0:2]
for _bbox in resized_bboxes:
_bbox *= [w, h, w, h]
draw_bbox(resized_image, _bbox)
util.plt.imshow('resized_image', resized_image)
| [
"546043882@qq.com"
] | 546043882@qq.com |
ffb47844aa469d4c90867e905b871c95d71323e5 | bf7871f516bd9dd5a39bef41d58a3e26072d79bd | /app/reservation/__init__.py | 141b71dc564db5374253b0bdbc10f52bcba5c503 | [] | no_license | patromi/SensoPark.v2 | 11fe193c5d0e7b3b0781c3ee02050abbff6d6904 | ba00b1e22fc1a3efa18302507fca1313c2dea282 | refs/heads/main | 2023-08-05T07:44:11.648847 | 2021-09-26T15:19:36 | 2021-09-26T15:19:36 | 355,197,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from flask import Blueprint
reservation = Blueprint('reservation', __name__)
from . import views
| [
"noreply@github.com"
] | noreply@github.com |
5ab2fec2b8f90755f0c2c41cd1c55b6a58f2d869 | ea02eb8c52ef66fe8399516dc0103b95ea1dd7c4 | /leo/lilac.py | 62a481a1484a09dabefd18f739923e60614fee7a | [] | no_license | y010204025/repo | 6c9d9601a14b8d003789bfe8266b1e10e9d41a49 | 074fef70cdccf3c62092a848e88bb27fbabea8d3 | refs/heads/master | 2020-03-23T03:38:57.191796 | 2018-07-15T14:51:38 | 2018-07-15T14:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | #!/usr/bin/env python3
#
# This is a complex version of lilac.py for building
# a package from AUR.
#
# You can do something before/after building a package,
# including modify the 'pkgver' and 'md5sum' in PKBUILD.
#
# This is especially useful when a AUR package is
# out-of-date and you want to build a new one, or you
# want to build a package directly from sourceforge but
# using PKGBUILD from AUR.
#
# See also:
# [1] ruby-sass/lilac.py
# [2] aufs3-util-lily-git/lilac.py
# [3] octave-general/lilac.py
#
from lilaclib import *
build_prefix = 'extra-x86_64'
def pre_build():
aur_pre_build()
need_rebuild = False
for line in edit_file('PKGBUILD'):
# edit PKGBUILD
if line.strip().startswith("depends="):
words = line.split(" ")
words.insert(-1, "'python-setuptools'")
line = " ".join(words)
if line.strip().startswith("pkgver=5.1"):
need_rebuild = True
if need_rebuild and line.strip().startswith("pkgrel=1"):
line = "pkgrel=2"
print(line)
post_build = aur_post_build
# do some cleanup here after building the package, regardless of result
# def post_build_always(success):
# pass
if __name__ == '__main__':
single_main(build_prefix)
| [
"farseerfc@gmail.com"
] | farseerfc@gmail.com |
cb83aa5000ddae05ad0d2cabf7459748dda5da7f | f48e82679edf65792ab06d4f6e332ff3fa232c86 | /apps/characters/migrations/0001_initial.py | 80a01685469bea8dec44b6932e27ea441c663267 | [] | no_license | rofklaw/dnd | ac7d824922cf40ac961ce1042a989b574bb3a033 | 2c861e03bddc1af42cd5ebe4944e23a040a2076c | refs/heads/master | 2021-01-23T06:44:53.064510 | 2017-09-08T22:23:19 | 2017-09-08T22:23:19 | 86,395,166 | 2 | 0 | null | 2017-03-30T23:01:03 | 2017-03-27T23:51:56 | Python | UTF-8 | Python | false | false | 3,110 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-31 19:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('logreg', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('strength', models.IntegerField()),
('strModifier', models.IntegerField()),
('dexterity', models.IntegerField()),
('dexModifier', models.IntegerField()),
('constitution', models.IntegerField()),
('conModifier', models.IntegerField()),
('intelligence', models.IntegerField()),
('intModifier', models.IntegerField()),
('wisdom', models.IntegerField()),
('wisModifier', models.IntegerField()),
('charisma', models.IntegerField()),
('chaModifier', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Character',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('level', models.IntegerField()),
('experience', models.IntegerField()),
('my_class', models.CharField(max_length=50)),
('race', models.CharField(max_length=50)),
('background', models.CharField(max_length=50)),
('alignment', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_characters', to='logreg.User')),
],
),
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('equipment', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('character', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_equipment', to='characters.Character')),
],
),
migrations.AddField(
model_name='attribute',
name='character',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attributes', to='characters.Character'),
),
]
| [
"briggs.mcknight@gmail.com"
] | briggs.mcknight@gmail.com |
dd54c49f97d052f3d01d460d92b0d7b59506280b | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /tools/cygprofile/patch_orderfile.py | 071861e53b11cf764d6c45f7196291be77777555 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 16,496 | py | #!/usr/bin/env vpython
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
sections), matches the symbols in the orderfile and augments each symbol with
the symbols residing at the same address (due to having identical code). The
output is a list of section or symbols matching rules appropriate for the linker
option -section-ordering-file for gold and --symbol-ordering-file for lld. Both
linkers are fine with extra directives that aren't matched in the binary, so we
construct a file suitable for both, concatenating sections and symbols. We
assume that the unpatched orderfile is built for gold, that is, it only contains
sections.
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." and other
suffixes)
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile suitable for gold and lld
6. Output catch-all section matching rules for unprofiled methods. This is
ineffective for lld, as it doesn't handle wildcards, but puts unordered
symbols after the ordered ones.
"""
import argparse
import collections
import logging
import sys
import cyglog_to_orderfile
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
# Output sections are constructed as prefix + symbol_name, hence the empty
# prefix is used to generate the symbol entry for lld.
_PREFIXES = ('.text.hot.', '.text.unlikely.', '.text.', '')
# Suffixes for the symbols. These are due to method splitting for inlining and
# method cloning for various reasons including constant propagation and
# inter-procedural optimization.
_SUFFIXES = ('.clone.', '.part.', '.isra.', '.constprop.')
def RemoveSuffixes(name):
"""Strips method name suffixes from cloning and splitting.
.clone. comes from cloning in -O3.
.part. comes from partial method splitting for inlining.
.isra. comes from inter-procedural optimizations.
.constprop. is cloning for constant propagation.
"""
for suffix in _SUFFIXES:
name = name.split(suffix)[0]
return name
def _UniqueGenerator(generator):
"""Converts a generator to skip yielding elements already seen.
Example:
@_UniqueGenerator
def Foo():
yield 1
yield 2
yield 1
yield 3
Foo() yields 1,2,3.
"""
def _FilteringFunction(*args, **kwargs):
returned = set()
for item in generator(*args, **kwargs):
if item in returned:
continue
returned.add(item)
yield item
return _FilteringFunction
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
symbol_infos_no_suffixes = [
s._replace(name=RemoveSuffixes(s.name)) for s in symbol_infos]
return (symbol_extractor.GroupSymbolInfosByOffset(symbol_infos_no_suffixes),
symbol_extractor.GroupSymbolInfosByName(symbol_infos_no_suffixes))
def _StripPrefix(line):
"""Strips the linker section name prefix from a symbol line.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
# Went away with GCC, make sure it doesn't come back, as the orderfile
# no longer contains it.
assert not line.startswith('.text.startup.')
for prefix in _PREFIXES:
if prefix and line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _SectionNameToSymbols(section_name, section_to_symbols_map):
"""Yields all symbols which could be referred to by section_name.
If the section name is present in the map, the names in the map are returned.
Otherwise, any clone annotations and prefixes are stripped from the section
name and the remainder is returned.
"""
if (not section_name or
section_name == '.text' or
section_name.endswith('*')):
return # Don't return anything for catch-all sections
if section_name in section_to_symbols_map:
for symbol in section_to_symbols_map[section_name]:
yield symbol
else:
name = _StripPrefix(section_name)
if name:
yield name
def GetSectionsFromOrderfile(filename):
"""Yields the sections from an orderfile.
Args:
filename: The name of the orderfile.
Yields:
A list of symbol names.
"""
with open(filename, 'r') as f:
for line in f.xreadlines():
line = line.rstrip('\n')
if line:
yield line
@_UniqueGenerator
def GetSymbolsFromOrderfile(filename, section_to_symbols_map):
"""Yields the symbols from an orderfile. Output elements do not repeat.
Args:
filename: The name of the orderfile.
section_to_symbols_map: The mapping from section to symbol names. If a
section name is missing from the mapping, the
symbol name is assumed to be the section name with
prefixes and suffixes stripped.
Yields:
A list of symbol names.
"""
# TODO(lizeb,pasko): Move this method to symbol_extractor.py
for section in GetSectionsFromOrderfile(filename):
for symbol in _SectionNameToSymbols(RemoveSuffixes(section),
section_to_symbols_map):
yield symbol
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expands a symbol to include all symbols with the same offset.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if profiled_symbol not in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
@_UniqueGenerator
def _SectionMatchingRules(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets the set of section matching rules for section_name.
These rules will include section_name, but also any sections which may
contain the same code due to cloning, splitting, or identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section names including at least section_name.
"""
for name in _ExpandSection(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map):
yield name
# Since only a subset of methods (mostly those compiled with O2) ever get
# suffixes, don't emit the wildcards for ones where it won't be helpful.
# Otherwise linking takes too long.
if name in suffixed_sections:
# TODO(lizeb,pasko): instead of just appending .*, append .suffix.* for
# _SUFFIXES. We can't do this right now because that many wildcards
# seems to kill the linker (linking libchrome takes 3 hours). This gets
# almost all the benefit at a much lower link-time cost, but could cause
# problems with unexpected suffixes.
yield name + '.*'
def _ExpandSection(section_name, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map):
"""Yields the set of section names for section_name.
This set will include section_name, but also any sections which may contain
the same code due to identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
Yields:
Section names including at least section_name.
"""
yield section_name
for first_sym in _SectionNameToSymbols(section_name,
section_to_symbols_map):
for symbol in _SymbolsWithSameOffset(first_sym, name_to_symbol_infos,
offset_to_symbol_infos):
if symbol in symbol_to_sections_map:
for section in symbol_to_sections_map[symbol]:
yield section
for prefix in _PREFIXES:
yield prefix + symbol
@_UniqueGenerator
def _ExpandSections(section_names, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets an ordered set of section matching rules for a list of sections.
Rules will not be repeated.
Args:
section_names: The sections to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
_GroupSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol names.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section matching rules including at least section_names.
"""
for profiled_section in section_names:
for section in _SectionMatchingRules(
profiled_section, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed_sections):
yield section
def _CombineSectionListsByPrimaryName(symbol_to_sections_map):
"""Combines values of the symbol_to_sections_map by stripping suffixes.
Example:
{foo: [.text.foo, .text.bar.part.1],
foo.constprop.4: [.text.baz.constprop.3]} ->
{foo: [.text.foo, .text.bar, .text.baz]}
Args:
symbol_to_sections_map: Mapping from symbol name to list of section names
Returns:
The same mapping, but with symbol and section names suffix-stripped.
"""
simplified = {}
for suffixed_symbol, suffixed_sections in symbol_to_sections_map.iteritems():
symbol = RemoveSuffixes(suffixed_symbol)
sections = [RemoveSuffixes(section) for section in suffixed_sections]
simplified.setdefault(symbol, []).extend(sections)
return simplified
def _SectionsWithSuffixes(symbol_to_sections_map):
"""Finds sections which have suffixes applied.
Args:
symbol_to_sections_map: a map where the values are lists of section names.
Returns:
A set containing all section names which were seen with suffixes applied.
"""
sections_with_suffixes = set()
for suffixed_sections in symbol_to_sections_map.itervalues():
for suffixed_section in suffixed_sections:
section = RemoveSuffixes(suffixed_section)
if section != suffixed_section:
sections_with_suffixes.add(section)
return sections_with_suffixes
def _StripSuffixes(section_list):
"""Remove all suffixes on items in a list of sections or symbols."""
return [RemoveSuffixes(section) for section in section_list]
def GeneratePatchedOrderfile(unpatched_orderfile, native_lib_filename,
output_filename):
"""Writes a patched orderfile.
Args:
unpatched_orderfile: (str) Path to the unpatched orderfile.
native_lib_filename: (str) Path to the native library.
output_filename: (str) Path to the patched orderfile.
"""
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
native_lib_filename)
obj_dir = cygprofile_utils.GetObjDir(native_lib_filename)
raw_symbol_map = cyglog_to_orderfile.ObjectFileProcessor(
obj_dir).GetSymbolToSectionsMap()
suffixed = _SectionsWithSuffixes(raw_symbol_map)
symbol_to_sections_map = _CombineSectionListsByPrimaryName(raw_symbol_map)
section_to_symbols_map = cygprofile_utils.InvertMapping(
symbol_to_sections_map)
profiled_sections = _StripSuffixes(
GetSectionsFromOrderfile(unpatched_orderfile))
expanded_sections = _ExpandSections(
profiled_sections, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed)
with open(output_filename, 'w') as f:
# Make sure the anchor functions are located in the right place, here and
# after everything else.
# See the comment in //base/android/library_loader/anchor_functions.cc.
#
# __cxx_global_var_init is one of the largest symbols (~38kB as of May
# 2018), called extremely early, and not instrumented.
first_sections = ('dummy_function_start_of_ordered_text',
'__cxx_global_var_init')
for section in first_sections:
for prefix in _PREFIXES:
f.write(prefix + section + '\n')
for section in expanded_sections:
f.write(section + '\n')
for prefix in _PREFIXES:
f.write(prefix + 'dummy_function_end_of_ordered_text\n')
# The following is needed otherwise Gold only applies a partial sort.
f.write('.text\n') # gets methods not in a section, such as assembly
f.write('.text.*\n') # gets everything else
# Since wildcards are not supported by lld, the "end of text" anchor symbol
# is not emitted, a different mechanism is used instead. See comments in the
# file above.
for prefix in _PREFIXES:
if prefix:
f.write(prefix + 'dummy_function_at_the_end_of_text\n')
def _CreateArgumentParser():
"""Creates and returns the argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument('--target-arch', action='store',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
parser.add_argument('--unpatched-orderfile', required=True,
help='Path to the unpatched orderfile')
parser.add_argument('--native-library', required=True,
help='Path to the native library')
parser.add_argument('--output-file', required=True, help='Output filename')
return parser
def main():
parser = _CreateArgumentParser()
options = parser.parse_args()
if not options.target_arch:
options.arch = cygprofile_utils.DetectArchitecture()
symbol_extractor.SetArchitecture(options.target_arch)
GeneratePatchedOrderfile(options.unpatched_orderfile, options.native_library,
options.output_file)
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| [
"arnaud@geometry.ee"
] | arnaud@geometry.ee |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.