repo_name
stringlengths
6
101
path
stringlengths
4
300
text
stringlengths
7
1.31M
nabiirah/advent-of-code
2018/day_22.py
"""Advent of Code Day 22 - Mode Maze""" def map_cave(target, depth): """Calculate and map the cave coordinates with padding around target.""" target_x, target_y = target cave = {} for y in range(target_y + 50): for x in range(target_x + 50): erosion_level = calculate_erosion((x, y), depth, target, cave) cave[(x, y)] = erosion_level # Calculated erosion to make calculation easy then convert to type for coords, erosion_level in cave.items(): cave[coords] %= 3 return cave def calculate_erosion(coords, depth, target, cave): """Return the erosion level of a coordinate.""" x, y = coords if (x, y) == (0, 0): return (0 + depth) % 20183 elif (x, y) == target: return (0 + depth) % 20183 elif y == 0: return (x * 16807 + depth) % 20183 elif x == 0: return (y * 48271 + depth) % 20183 else: return (cave[(x-1, y)] * cave[(x, y-1)] + depth) % 20183 def find_path(target, cave): """Return quickest path to target- Dijkstra with (coords, tool) nodes.""" tools = {0: ('c', 't'), 1: ('c', 'n'), 2:('t', 'n')} distances = {((0, 0), 't'): 0} initial_node = ((0, 0), 't') visited = set((initial_node)) nodes = [] node = initial_node while True: coords, tool = node visited.add((coords, tool)) x, y = coords adjacent = [(x-1, y), (x+1, y), (x, y-1), (x, y+1)] for neighbour in adjacent: if neighbour not in cave or neighbour in visited: continue node_distance = distances[node] distance = distances.get((neighbour, tool), 1000000) neighbour_tools = tools[cave[neighbour]] if tool in neighbour_tools: if node_distance + 1 < distance: distances[(neighbour, tool)] = node_distance + 1 nodes.append((neighbour, tool)) continue current_tools = tools[cave[coords]] # Change tool, update node and add it to nodes if quicker if current_tools[0] in neighbour_tools: updated_node = (coords, current_tools[0]) distance = distances.get(updated_node, 1000000) if node_distance + 7 < distance: distances[updated_node] = node_distance + 7 nodes.append(updated_node) elif current_tools[1] in neighbour_tools: updated_node = (coords, current_tools[1]) distance = distances.get(updated_node, 1000000) if node_distance + 7 < distance: distances[updated_node] = node_distance + 7 nodes.append(updated_node) # Add to visited visited.add((node)) # Choose next node nodes = sorted(nodes, key=lambda x: distances[x], reverse=True) node = nodes.pop() # Check if target reached add tool switch time if required if node[0] == target: if node[1] == 't': return distances[node] else: return distances[node] + 7 def main(): """Map cave, evaluate risk level and find quickest path to target.""" target = (13, 726) depth = 3066 cave = map_cave(target, depth) # Answer One risk_level = 0 for coords, terrain in cave.items(): if coords[0] <= target[0] and coords[1] <= target[1]: risk_level += terrain print("Risk level:", risk_level) # Answer Two quickest_path = find_path(target, cave) print("Quickest Path:", quickest_path) if __name__ == '__main__': main()
thecodecafe/sterlin
__tests__/utils/Encryption.test.js
<reponame>thecodecafe/sterlin require('../../configs/dotenv'); const {encrypto, decrypto} = require('../../utils/Encryption.util'); describe('<Encryption.encrypto>', () => { describe('Encrypto', () => { it('should encrypt a given data', () => { expect(encrypto('string')).toBeDefined(); }); it('should encrypt string', () => { const data = 'password'; const encrypted = encrypto(data); expect(encrypted).toBeDefined(); expect(encrypted).not.toBe(data); expect(typeof(encrypted)).toBe('string'); }); it('should encrypt number', () => { const data = 348734; const encrypted = encrypto(data); expect(encrypted).toBeDefined(); expect(encrypted).not.toBe(data); expect(typeof(encrypted)).toBe('string'); }); it('should encrypt array', () => { const data = [4, 5]; const encrypted = encrypto(data); expect(encrypted).toBeDefined(); expect(encrypted).not.toBe(data); expect(typeof(encrypted)).toBe('string'); }); it('should encrypt object', () => { const data = {name: 'Jon'}; const encrypted = encrypto(data); expect(encrypted).toBeDefined(); expect(encrypted).not.toBe(data); expect(typeof(encrypted)).toBe('string'); }); it('should encrypt boolean', () => { const data = true; const encrypted = encrypto(data); expect(encrypted).toBeDefined(); expect(encrypted).not.toBe(data); expect(typeof(encrypted)).toBe('string'); }); it('should not encrypt null or undefined', () => { expect(encrypto(null)).toBe(null); expect(encrypto()).toBe(null); }); }); describe('Encrypto', () => { let encrypted; const defaultData = 'something'; beforeEach(() => { encrypted = encrypto(defaultData); }); it('should encrypt a given data', () => { expect(decrypto(encrypted)).toBeDefined(); }); it('should encrypt string', () => { expect(decrypto(encrypted)).toStrictEqual(defaultData); }); it('should encrypt number', () => { const data = 348734; encrypted = encrypto(data); expect(decrypto(encrypted)).toStrictEqual(data); }); it('should encrypt array', () => { const data = [4, 5]; encrypted = encrypto(data); expect(decrypto(encrypted)).toStrictEqual(data); }); it('should encrypt object', () => { const data = {name: 'Jon'}; encrypted = encrypto(data); expect(decrypto(encrypted)).toStrictEqual(data); }); it('should encrypt boolean', () => { const data = true; encrypted = encrypto(data); expect(decrypto(encrypted)).toStrictEqual(data); }); it('should not encrypt null or undefined', () => { expect(decrypto(null)).toBe(null); expect(decrypto()).toBe(null); expect(decrypto(2)).toBe(null); expect(decrypto(true)).toBe(null); expect(decrypto([])).toBe(null); expect(decrypto({})).toBe(null); expect(decrypto('')).toBe(null); expect(decrypto(String(' '))).toBe(null); }); }); });
wayfinder/Wayfinder-Server
Server/Servers/src/ServerRegionIDs.cpp
/* Copyright (c) 1999 - 2010, Vodafone Group Services Ltd All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Vodafone Group Services Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "ServerRegionIDs.h" #include "TopRegionRequest.h" #include "STLStringUtility.h" uint32 ServerRegionIDs::addRegionIDsForReq( vector<uint32>& regionIDs, uint32 regionGroupID, const TopRegionRequest* topReg ) const { regionGroupMap::const_iterator findIt = m_regionGroupMap.find( regionGroupID ); uint32 nbr = 0; if ( findIt != m_regionGroupMap.end() ) { for ( uint32 i = 0 ; i < (*findIt).second.first.size() ; ++i ) { if ( topReg->getTopRegionWithID( (*findIt).second.first[ i ] ) ) { regionIDs.push_back( (*findIt).second.first[ i ] ); ++nbr; } } } return nbr; } #ifdef USE_XML //#include "XMLParserHelper.h" #include <dom/DOM.hpp> #include <parsers/XercesDOMParser.hpp> //#include <util/PlatformUtils.hpp> //#include <util/XMLUniDefs.hpp> #include <framework/LocalFileInputSource.hpp> #include <sax/SAXParseException.hpp> #include "XMLUtility.h" static uint32 parseRegionGroup( const DOMNode* node, NameCollection& c, const char* name = "region_group" ) { uint32 id = MAX_UINT32; if ( XMLString::equals( node->getNodeName(), name ) ) { // Get attributes DOMNamedNodeMap* attributes = node->getAttributes(); DOMNode* attribute; MC2String identName; for ( uint32 i = 0 ; i < attributes->getLength() ; i++ ) { attribute = attributes->item( i ); char* tmpStr = XMLUtility::transcodefromucs( attribute->getNodeValue() ); char* tmpPtr = NULL; if ( XMLString::equals( attribute->getNodeName(), "ident" ) ) { identName = tmpStr; } else if ( XMLString::equals( attribute->getNodeName(), "id" ) ) { id = strtoul( tmpStr, &tmpPtr, 10 ); if ( tmpPtr == NULL || tmpPtr[ 0 ] != '\0' ) { mc2log << warn << "parseRegionGroup Problem parsing " << "id not a valid number." << endl; } } else if ( XMLString::equals( attribute->getNodeName(), "mc2stringcodes" ) ) { vector<MC2String> sc( STLStringUtility::explode( ",", tmpStr ) ); vector<StringTableUTF8::stringCode> strc; for ( uint32 i = 0 ; i < sc.size() ; ++i ) { uint32 scid = strtoul( sc[ i ].c_str(), &tmpPtr, 0 ); if ( tmpPtr == NULL || tmpPtr[ 0 ] != '\0' ) { mc2log << warn << "parseRegionGroup Problem parsing " << "mc2stringcodes entry not a valid number." << endl; } strc.push_back( StringTableUTF8::stringCode( scid ) ); } for ( uint32 l = StringTableUTF8::ENGLISH ; l < StringTableUTF8::SMSISH_ENG ; ++l ) { MC2String s; for ( uint32 i = 0 ; i < strc.size() ; ++i ) { if ( i != 0 && strc[ i ] != StringTableUTF8::COMMA ) { s += " "; } s += StringTable::getString( strc[ i ], StringTableUTF8::languageCode( l ) ); } mc2dbg4 << s << " (" << id << ") in " << LangTypes::getLanguageAsString( ItemTypes::getLanguageCodeAsLanguageType( StringTableUTF8::languageCode( l ) ), true ) << endl; c.addName( new Name( s.c_str(), ItemTypes::getLanguageCodeAsLanguageType( StringTableUTF8::languageCode( l ) ) ) ); } // ident = } else { mc2log << warn << "parseRegionGroup " << "unknown attribute for " << name << " element " << " Name " << attribute->getNodeName() << " Type " << attribute->getNodeType() << endl; } delete [] tmpStr; } // If no names use the ident name if ( c.getSize() == 0 ) { c.addName( new Name( identName.c_str(), LangTypes::english ) ); } // No children } else { mc2log << error << "parseRegionGroup not " << MC2CITE( name ) << " " << MC2CITE( node->getNodeName() ) << endl; } return id; } static uint32 parseRegion( const DOMNode* node, vector<uint32>& groups, RegionIDs::mccMap& mccIdMap, RegionIDs::isoNameMap& isoNameIdMap) { uint32 id = MAX_UINT32; if ( XMLString::equals( node->getNodeName(), "region" ) ) { // Get attributes DOMNamedNodeMap* attributes = node->getAttributes(); DOMNode* attribute; vector< MC2String > mccVec; MC2String isoName; for ( uint32 i = 0 ; i < attributes->getLength() ; i++ ) { attribute = attributes->item( i ); char* tmpStr = XMLUtility::transcodefromucs( attribute->getNodeValue() ); char* tmpPtr = NULL; if ( XMLString::equals( attribute->getNodeName(), "ident" ) ) { // Not used } else if ( XMLString::equals( attribute->getNodeName(), "id" ) ) { id = strtoul( tmpStr, &tmpPtr, 10 ); if ( tmpPtr == NULL || tmpPtr[ 0 ] != '\0' ) { mc2log << warn << "parseRegion Problem parsing " << "id not a valid number." << endl; id = MAX_UINT32; } } else if ( XMLString::equals( attribute->getNodeName(), "mcc" ) ) { mccVec = STLStringUtility::explode( ",", tmpStr ) ; } else if ( XMLString::equals( attribute->getNodeName(), "iso_name" ) ) { if( strlen( tmpStr ) > 0) { isoName = tmpStr; } } else { mc2log << warn << "parseRegion " << "unknown attribute for region element " << " Name " << attribute->getNodeName() << " Type " << attribute->getNodeType() << endl; } delete [] tmpStr; } for(uint32 i = 0; i < mccVec.size(); i++) { // Add the MCC to the map uint32 mcc = STLStringUtility::strtoul( mccVec[ i ] ); mccIdMap.insert( make_pair( mcc, id ) ); } if ( isoName.length() > 0) { // Add iso name to map isoNameIdMap.insert( make_pair( isoName, id ) ); } // Children (groups) DOMNode* child = node->getFirstChild(); bool ok = true; while ( child != NULL && ok ) { switch ( child->getNodeType() ) { case DOMNode::ELEMENT_NODE : // See if the element is a known type if ( XMLString::equals( child->getNodeName(), "region_group_id" ) ) { // Get id and add to groups NameCollection c; uint32 gid = parseRegionGroup( child, c, "region_group_id" ); if ( gid != MAX_UINT32 ) { groups.push_back( gid ); } else { mc2log << warn << "parseRegion failed parsing " << "region_group_id." << endl; ok = false; id = MAX_UINT32; } } else { mc2log << warn << "parseRegion " << "odd with Element in region, " << "element: " << child->getNodeName() << endl; } break; case DOMNode::COMMENT_NODE : // Ignore comments break; case DOMNode::TEXT_NODE : // Ignore stray texts break; default: mc2log << warn << "parseRegion odd " "node type in region element: " << child->getNodeName() << " type " << child->getNodeType() << endl; break; } child = child->getNextSibling(); } // End while child != NULL } else { mc2log << error << "parseRegion not region " << MC2CITE( node->getNodeName() ) << endl; } return id; } static bool parseRegionGroupList( const DOMNode* node, RegionIDs::regionGroupMap& groupmap ) { bool ok = true; DOMNode* child = node->getFirstChild(); vector<uint32> uint32Array; while ( child != NULL && ok ) { switch ( child->getNodeType() ) { case DOMNode::ELEMENT_NODE : // See if the element is a known type if ( XMLString::equals( child->getNodeName(), "region_group" ) ) { MC2String ident; NameCollection c; uint32 id = parseRegionGroup( child, c ); if ( id != MAX_UINT32 ) { groupmap.insert( make_pair( id, make_pair( uint32Array, c ) ) ); } else { mc2log << fatal << "parseRegionGroupList failed " << "parsing region_group." << endl; ok = false; } } else { mc2log << warn << "parseRegionGroupList " << "odd Element in region_group_list element: " << child->getNodeName() << endl; } break; case DOMNode::COMMENT_NODE : // Ignore comments break; case DOMNode::TEXT_NODE : // Ignore stray texts break; default: ok = false; mc2log << warn << "parseRegionGroupList odd " "node type in region_group_list element: " << child->getNodeName() << " type " << child->getNodeType()<< endl; break; } child = child->getNextSibling(); } return ok; } static bool parseRegionIDs( const DOMNode* node, RegionIDs::regionIDMap& regionmap, RegionIDs::regionGroupMap& groupmap, RegionIDs::mccMap& mccIdMap, RegionIDs::isoNameMap& isoNameIdMap ) { bool ok = true; DOMNode* child = node->getFirstChild(); vector<uint32> uint32Array; while ( child != NULL && ok ) { switch ( child->getNodeType() ) { case DOMNode::ELEMENT_NODE : // See if the element is a known type if ( XMLString::equals( child->getNodeName(), "region" ) ) { uint32Array.clear(); uint32 id = parseRegion( child, uint32Array, mccIdMap, isoNameIdMap ); if ( id != MAX_UINT32 ) { regionmap.insert( make_pair( id, uint32Array ) ); // Add region to the groups for ( uint32 i = 0 ; i < uint32Array.size() ; ++i ) { RegionIDs::regionGroupMap::iterator findIt = groupmap.find( uint32Array[ i ] ); if ( findIt != groupmap.end() ) { (*findIt).second.first.push_back( id ); } else { mc2log << fatal << "RegionIDs unknown " << "region_group id " << uint32Array[ i ] << endl; ok = false; } } } else { mc2log << fatal << "parseRegionIDs failed " << "parsing region." << endl; ok = false; } } else { mc2log << warn << "parseRegionIDs " << "odd Element in region_ids element: " << child->getNodeName() << endl; } break; case DOMNode::COMMENT_NODE : // Ignore comments break; case DOMNode::TEXT_NODE : // Ignore stray texts break; default: ok = false; mc2log << warn << "parseRegionIDs odd " "node type in region_ids element: " << child->getNodeName() << " type " << child->getNodeType()<< endl; break; } child = child->getNextSibling(); } return ok; } static uint32 parseRegionID( const DOMNode* node ) { uint32 id = MAX_UINT32; if ( XMLString::equals( node->getNodeName(), "region_id" ) ) { // Get attributes DOMNamedNodeMap* attributes = node->getAttributes(); DOMNode* attribute; for ( uint32 i = 0 ; i < attributes->getLength() ; i++ ) { attribute = attributes->item( i ); char* tmpStr = XMLUtility::transcodefromucs( attribute->getNodeValue() ); char* tmpPtr = NULL; if ( XMLString::equals( attribute->getNodeName(), "id" ) ) { id = strtoul( tmpStr, &tmpPtr, 10 ); if ( tmpPtr == NULL || tmpPtr[ 0 ] != '\0' ) { mc2log << warn << "parseRegionID Problem parsing " << "id not a valid number." << endl; id = MAX_UINT32; } } else { mc2log << warn << "parseRegionID " << "unknown attribute for region_id element " << " Name " << attribute->getNodeName() << " Type " << attribute->getNodeType() << endl; } delete [] tmpStr; } } else { mc2log << error << "parseRegionID not region_id " << MC2CITE( node->getNodeName() ) << endl; } return id; } static bool parseRegionList( const DOMNode* node, RegionIDs::regionIDMap& regionmap, RegionIDs::regionGroupMap& groupmap, RegionIDs::regionListMap& listmap ) { bool ok = true; DOMNode* child = node->getFirstChild(); RegionList list; MC2String name; DOMNamedNodeMap* attributes = node->getAttributes(); DOMNode* attribute; for ( uint32 i = 0 ; i < attributes->getLength() ; i++ ) { attribute = attributes->item( i ); char* tmpStr = XMLUtility::transcodefromucs( attribute->getNodeValue() ); if ( XMLString::equals( attribute->getNodeName(), "ident" ) ) { name = tmpStr; } else { mc2log << warn << "parseRegionList " << "unknown attribute for region list element " << " Name " << attribute->getNodeName() << " Type " << attribute->getNodeType() << endl; } delete [] tmpStr; } while ( child != NULL && ok ) { switch ( child->getNodeType() ) { case DOMNode::ELEMENT_NODE : // See if the element is a known type if ( XMLString::equals( child->getNodeName(), "region_id" ) ) { // Get the id-attribute of the region_id uint32 id = parseRegionID( child ); if ( id != MAX_UINT32 ) { list.addRegion( id ); } else { mc2log << warn << "parseRegionList failed parsing " << "region_id." << endl; ok = false; } } else if ( XMLString::equals( child->getNodeName(), "region_group_id" ) ) { // Get id and add to list NameCollection c; uint32 gid = parseRegionGroup( child, c, "region_group_id" ); if ( gid != MAX_UINT32 ) { list.addGroup( gid ); } else { mc2log << warn << "parseRegionList failed parsing " << "region_group_id." << endl; ok = false; } } else { mc2log << warn << "parseRegionList " << "odd Element in region_list element: " << child->getNodeName() << endl; } break; case DOMNode::COMMENT_NODE : // Ignore comments break; case DOMNode::TEXT_NODE : // Ignore stray texts break; default: ok = false; mc2log << warn << "parseRegionList odd " "node type in region_lists element: " << child->getNodeName() << " type " << child->getNodeType()<< endl; break; } child = child->getNextSibling(); } if ( ok ) { listmap.insert( make_pair( name, list ) ); } return ok; } static bool parseRegionLists( const DOMNode* node, RegionIDs::regionIDMap& regionmap, RegionIDs::regionGroupMap& groupmap, RegionIDs::regionListMap& listmap ) { bool ok = true; DOMNode* child = node->getFirstChild(); while ( child != NULL && ok ) { switch ( child->getNodeType() ) { case DOMNode::ELEMENT_NODE : // See if the element is a known type if ( XMLString::equals( child->getNodeName(), "region_list" ) ) { ok = parseRegionList( child, regionmap, groupmap, listmap ); } else { mc2log << warn << "parseRegionLists " << "odd Element in region_list element: " << child->getNodeName() << endl; } break; case DOMNode::COMMENT_NODE : // Ignore comments break; case DOMNode::TEXT_NODE : // Ignore stray texts break; default: ok = false; mc2log << warn << "parseRegionLists odd " << "node type in region_lists element: " << child->getNodeName() << " type " << child->getNodeType()<< endl; break; } child = child->getNextSibling(); } return ok; } #endif ServerRegionIDs::ServerRegionIDs() { #ifdef USE_XML try { const char* inputFile = "region_ids.xml"; LocalFileInputSource s( X( "./" ), X( inputFile ) ); //X( "./region_ids.xml" ) ); //X( "./" ), X( "region_ids.xml" ) ); //X( "./region_ids.xml" ) ); XercesDOMParser parser; parser.setIncludeIgnorableWhitespace( false ); bool ok = true; try { parser.parse( s ); } catch ( const XMLException& e ) { mc2log << error << "RegionIDs an XMLerror occured " << "during parsing of region file: " << e.getMessage() << " line " << e.getSrcLine() << endl; ok = false; } catch( const SAXParseException& e) { mc2log << error << "RegionIDs an SAXerror occured " << "during parsing of region file: " << e.getMessage() << ", " << "line " << e.getLineNumber() << ", column " << e.getColumnNumber() << endl; ok = false; } if ( !ok ) { mc2log << fatal << "RegionIDs failed parsing region file." << endl; exit( 1 ); } DOMDocument* doc = parser.getDocument(); DOMElement* rootEl = doc ? doc->getDocumentElement() : NULL; if ( rootEl != NULL && XMLString::equals( rootEl->getNodeName(), "map_generation-mc2" ) ) { // Go throu the elements and handle them. DOMNode* child = rootEl->getFirstChild(); vector<uint32> uint32Array; while ( child != NULL && ok ) { switch ( child->getNodeType() ) { case DOMNode::ELEMENT_NODE : // See if the element is a known type if ( XMLString::equals( child->getNodeName(), "region_group_list" ) ) { ok = parseRegionGroupList( child, m_regionGroupMap ); } else if ( XMLString::equals( child->getNodeName(), "region_ids" ) ) { ok = parseRegionIDs( child, m_regionIDMap, m_regionGroupMap, m_mccMap, m_isoNameMap ); } else if ( XMLString::equals( child->getNodeName(), "region_lists" ) ) { ok = parseRegionLists( child, m_regionIDMap, m_regionGroupMap, m_regionListMap); } else { mc2log << warn << "RegionIDs " << "odd Element in map_generation-mc2 " << "element: " << child->getNodeName() << endl; } break; case DOMNode::COMMENT_NODE : // Ignore comments break; case DOMNode::TEXT_NODE : // Ignore stray texts break; default: ok = false; mc2log << warn << "XMLParserThread::xmlParseIsabmc2 odd " << "node type in isab-mc2 element: " << child->getNodeName() << " type " << child->getNodeType()<< endl; break; } child = child->getNextSibling(); } if ( !ok ) { exit( 1 ); } } else { if ( rootEl == NULL ) { mc2log << fatal << "RegionIDs no document in region file. File: " << inputFile << endl; } else { mc2log << fatal << "RegionIDs not map_generation-mc2 root " << "element! " << MC2CITE( rootEl->getNodeName() ) << endl; } exit( 1 ); } } catch ( const XMLException& e ) { mc2log << fatal << "RegionIDs an XMLerror occured " << "during opening of region file: " << e.getMessage() << endl; exit( 1 ); } #else mc2log << error << "RegionIDs not compiled with XML support no region " << "data loaded." << endl; #endif // Print all region groups and their regions vector<pair<uint32, NameCollection> > regionGroupIDs; addAllRegionGroups( regionGroupIDs ); for ( uint32 i = 0 ; i < regionGroupIDs.size() ; ++i ) { mc2dbg4 << "RegionGroup: " << hex << regionGroupIDs[ i ].second << " (" << regionGroupIDs[ i ].first << ") : "; vector<uint32> regionIDs; addRegionIDsFor( regionIDs, regionGroupIDs[ i ].first ); for ( uint32 j = 0 ; j < regionIDs.size() ; ++j ) { if ( j != 0 ) { mc2dbg4 << ", "; } mc2dbg4 << regionIDs[ j ]; } mc2dbg4 << dec << endl; } }
JRPerezJr/golang-course-notes
demo/pointers/pointers.go
package main import "fmt" type Counter struct { hits int } func increment(counter *Counter) { counter.hits += 1 fmt.Println("Counter", counter) } func replace(old *string, new string, counter *Counter) { *old = new increment(counter) } func main() { counter := Counter{} hello := "Hello" world := "World!" fmt.Println(hello, world) replace(&hello, "Hi", &counter) fmt.Println(hello, world) phrase := []string{hello, world} fmt.Println(phrase) replace(&phrase[1], "Go!", &counter) fmt.Println(phrase) }
xsteadfastx/hcloud-pricing-exporter
fetcher/server_backups.go
<reponame>xsteadfastx/hcloud-pricing-exporter package fetcher import ( "strconv" "github.com/hetznercloud/hcloud-go/hcloud" ) var _ Fetcher = &server{} // NewServerBackup creates a new fetcher that will collect pricing information on server backups. func NewServerBackup(pricing *PriceProvider) Fetcher { return &serverBackup{newBase(pricing, "server_backup", "location", "type")} } type serverBackup struct { *baseFetcher } func (serverBackup serverBackup) Run(client *hcloud.Client) error { servers, _, err := client.Server.List(ctx, hcloud.ServerListOpts{}) if err != nil { return err } for _, s := range servers { location := s.Datacenter.Location if s.BackupWindow != "" { serverPrice, err := findServerPricing(location, s.ServerType.Pricings) if err != nil { return err } hourlyPrice := serverBackup.toBackupPrice(serverPrice.Hourly.Gross) monthlyPrice := serverBackup.toBackupPrice(serverPrice.Monthly.Gross) serverBackup.hourly.WithLabelValues(s.Name, location.Name, s.ServerType.Name).Set(hourlyPrice) serverBackup.monthly.WithLabelValues(s.Name, location.Name, s.ServerType.Name).Set(monthlyPrice) } else { serverBackup.hourly.WithLabelValues(s.Name, location.Name, s.ServerType.Name).Set(0) serverBackup.monthly.WithLabelValues(s.Name, location.Name, s.ServerType.Name).Set(0) } } return nil } func (serverBackup serverBackup) toBackupPrice(rawServerPrice string) float64 { serverPrice, err := strconv.ParseFloat(rawServerPrice, 32) if err != nil { return 0 } return serverPrice * (serverBackup.pricing.ServerBackup() / 100) }
PendaRed/sackfixsessions
sf-session-common/src/main/scala/org/sackfix/session/sfSessionEvents.scala
package org.sackfix.session import org.sackfix.common.message.SfMessage import org.sackfix.common.validated.fields.SfFixMessageBody import org.sackfix.field.MsgTypeField /** * Created by Jonathan during November 2016. * Using Early Initialisation and a trait rather than inheritance...just to see how it works */ sealed trait SfSessionEvent { val name:String def description = name } sealed trait SfSessionSocketEvent extends SfSessionEvent case object SfSessionServerSocketOpenEvent extends {val name="Socket Waiting"} with SfSessionSocketEvent case object SfSessionNetworkConnectionEstablishedEvent extends {val name="Socket Connected"} with SfSessionSocketEvent case object SfSessionSocketCloseEvent extends {val name="Socket Closed"} with SfSessionSocketEvent case object SfSessionServerSocketCloseEvent extends {val name="Acceptor Socket Closed"} with SfSessionSocketEvent case class SfSessionFixMessageEvent(val msg:SfMessage) extends {val name="Fix Message Arrived"} with SfSessionEvent { override def description: String = { s"$name [${MsgTypeField.fixDescriptionByValue.getOrElse(msg.header.msgTypeField.value,"")}, msgSeqNum=${msg.header.msgSeqNumField.value}]" } } sealed trait SfSessionControlEvent extends SfSessionEvent case class SfControlTimeoutFired(id:String,durationMs:Long) extends {val name="TimeoutFired"} with SfSessionControlEvent case class SfControlNoSentHeartbeatTimeout(val noBeatsMissed:Int) extends {val name="Sent Nothing for Heartbeat Interval"} with SfSessionControlEvent case class SfControlNoReceivedHeartbeatTimeout(val noBeatsMissedPlus20Percent:Int) extends {val name="Received Nothing for Heartbeat Interval+20%"} with SfSessionControlEvent case class SfControlForceLogoutAndClose(val reason:String, val pausePriorToSocketCloseMs:Option[Long]=None) extends {val name=s"Force Logout [$reason]"} with SfSessionControlEvent sealed trait SfAction case class SfActionStartTimeout(id:String,durationMs:Long) extends SfAction case class SfActionSendMessageToFix(msg: SfFixMessageBody) extends SfAction case class SfActionResendMessages(beginSeqNo:Int, endSeqNo:Int) extends SfAction case class SfActionCloseSocket() extends SfAction case class SfActionCounterpartyHeartbeat(heartbeatSecs:Int) extends SfAction case object SfActionBusinessSessionOpenForSending extends SfAction case object SfActionBusinessSessionClosedForSending extends SfAction case class SfActionBusinessMessage(msg: SfMessage) extends SfAction
lipilian/PlenopticImageProcessing
PIPInterOpCUDA/CUDADataArray.hh
/** * Copyright 2019 <NAME>, Kiel University * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and * associated documentation files (the "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject * to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN * NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include "CUDA/CudaHelper.hh" namespace PIP { /////////////////////////////////////////////////////////////////////////////////////// /// GENERIC DATA INPUT/OUTPUT WRAPPER /////////////////////////////////////////////////////////////////////////////////////// template<typename DATATYPE> class CCUDADataArray { public: /// /// \brief CCUDADataArray tries to allocate CUDA device memory and uploads data (if requested). /// \param pData pointer to host memory to use /// \param sizeElemCount number of elements in \ref pData /// \param eTransferType data transfer type /// /// Needs given array to be allocated if eTransferType != NONE /// /// NOTE : throws in case of errors! /// CCUDADataArray(DATATYPE* pData, const size_t sizeElemCount, const ECUDAMemTransferType eTransferType = ECUDAMemTransferType::OUTPUT); /// /// \brief ~CUDAByteImage automatically frees CUDA memory allocated in CTor and downloads device /// mem if requested in CTor /// ~CCUDADataArray(); /// /// \brief SkipDeviceCopy frees CUDA resources without copying active device memory to host. /// /// NOTE : Object cannot be reused and is left in invalid state /// inline void SkipDeviceCopy() { // call de-allocation with do-not-copy flag __FreeCUDA(true); } /// /// \brief GetDevicePointer returns pointer to allocated CUDA device memory /// \return pointer to device mem /// /// NOTE : NEVER delete/free pointer /// inline DATATYPE* GetDevicePointer() const { return m_dpData; } inline int GetElementCount() const { return (int) m_sizeElemCount; } inline size_t GetElemByteSize() const { return sizeof(DATATYPE); } inline size_t GetTotalByteCount() const { return m_sizeElemCount * sizeof(DATATYPE); } protected: /// No other than initialization CTor allowed! CCUDADataArray() {} /// No other than initialization CTor allowed! CCUDADataArray(const CCUDADataArray&) {} /// /// \brief __AllocateCUDA allocates cuda device memory and uploads data if requested /// \param pData pointer to input data /// \param sizeElemCount number of DATATYPE elements in \ref pData /// \return 0 if successfull, else CUDA error code /// /// NOTE : Validity of input data is not checked! /// inline void __AllocateCUDA(DATATYPE* pData, const size_t sizeElemCount, const ECUDAMemTransferType eTransferType) { if ((pData == nullptr) && (eTransferType != ECUDAMemTransferType::NONE)) { throw CRuntimeException(std::string("PIP::CCUDADataArray::AllocateCUDA : data array as nullptr not allowed for eTransferType != ECUDAMemTransferType::NONE")); } // Store pointer for input/output m_pData = pData; m_sizeElemCount = sizeElemCount; m_eMemTransferType = eTransferType; // Allocate device memory cudaMalloc(&m_dpData, this->GetTotalByteCount()); if (m_dpData == nullptr) { throw CRuntimeException(std::string("PIP::CCUDADataArray: CUDA image malloc returned nullptr.")); } cudaError_t e; if ((e = cudaGetLastError()) != 0) { m_dpData = nullptr; throw CRuntimeException(std::string("PIP::CCUDADataArray : CUDA malloc error : \"") + std::string(cudaGetErrorString(e)) + std::string("\"")); } // Upload data if requested if ((m_eMemTransferType == ECUDAMemTransferType::INPUT) || (m_eMemTransferType == ECUDAMemTransferType::INOUT)) { // Copy data to cuda device cudaMemcpy(m_dpData, (void *)pData, m_sizeElemCount * sizeof(DATATYPE), cudaMemcpyHostToDevice); if ((e = cudaGetLastError()) != 0) { cudaFree(m_dpData); m_dpData = nullptr; throw CRuntimeException(std::string("PIP::CCUDADataArray : CUDA copy error : \"") + std::string(cudaGetErrorString(e)) + std::string("\"")); } } } /// /// \brief __FreeCUDA frees CUDA mem and invalidates this. Downloads data if requested. /// inline void __FreeCUDA(const bool flagSkipCopyToHost = false) { if (m_dpData == nullptr) { return; } // Download data if requested (and no skip is forced) if (flagSkipCopyToHost == false) { if ((m_eMemTransferType == ECUDAMemTransferType::OUTPUT) || (m_eMemTransferType == ECUDAMemTransferType::INOUT)) { cudaError_t e; if ((e = cudaGetLastError()) != 0) { cudaFree(m_dpData); throw CRuntimeException(std::string("PIP::CCUDADataArray : CUDA pre-copy error : \"") + std::string(cudaGetErrorString(e)) + std::string("\"")); } // Copy device data to host array cudaMemcpy(m_pData, (void *)m_dpData, m_sizeElemCount * sizeof(DATATYPE), cudaMemcpyDeviceToHost); if ((e = cudaGetLastError()) != 0) { cudaFree(m_dpData); throw CRuntimeException(std::string("PIP::CCUDADataArray : CUDA copy error : \"") + std::string(cudaGetErrorString(e)) + std::string("\"")); } } } cudaFree(m_dpData); m_dpData = nullptr; } /// Pointer to device memory containing image DATATYPE* m_dpData = nullptr; /// Stored data pointer for datas output copy (as handed from caller to CTor) DATATYPE* m_pData = nullptr; /// Number of elements in array size_t m_sizeElemCount = 0; /// Up-/Down-load data? Default download in DTor only ECUDAMemTransferType m_eMemTransferType = ECUDAMemTransferType::OUTPUT; }; } // namespace PIP
DenitsaRP/Java-Playground
JavaBasics/strings/Anagram.java
package strings; import java.util.Scanner; //Write java program to check if two words are anagrams: public class Anagram { public static void main(String[] args) { Scanner sc = new Scanner(System.in); System.out.println("Enter first word:"); String s1 = sc.nextLine(); System.out.println("Enter second word: "); String s2 = sc.nextLine(); int countAnagram = 0; boolean anagrams = true; if (s1.length() != s2.length()) { anagrams = false; } else { char[] ch1 = s1.toCharArray(); char[] ch2 = s2.toCharArray(); for (int k = 0; k < ch1.length; k++) { for (int j = 0; j < ch2.length; j++) { if (ch1[k] == ch2[j]) { countAnagram++; } } } if (countAnagram != s1.length()) { anagrams = false; } } System.out.println(anagrams ? "Anagrams" : "Not an anagram"); } }
qykjsz/eta
lib_ios_dialog/src/main/java/com/allens/lib_ios_dialog/IosDialog.java
<gh_stars>1-10 package com.allens.lib_ios_dialog; import android.app.Dialog; import android.content.Context; import android.graphics.Color; import android.support.annotation.NonNull; import android.view.Display; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.WindowManager; import android.widget.Button; import android.widget.EditText; import android.widget.FrameLayout; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.LinearLayout.LayoutParams; import android.widget.TextView; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; /** * @author allens */ public class IosDialog { private Context context; private Dialog dialog; private LinearLayout lLayout_bg; private LinearLayout lLayout_alert_ll; private TextView txt_title; private TextView txt_msg; private Button btn_neg; private Button btn_pos; private ImageView img_line; private Display display; /*** * 用于添加view */ private Map<String, View> viewHap; /** * 是否显示title */ private boolean showTitle = false; /*** * 是否显示msg */ private boolean showMsg = false; /*** * 是否显示确定按钮 */ private boolean showPosBtn = false; /** * 是否显示取消按钮 */ private boolean showNegBtn = false; /** * dialog 宽度 */ private float dialogWidth = 0.7f; public IosDialog(Context context) { this.context = context; WindowManager windowManager = (WindowManager) context.getSystemService(Context.WINDOW_SERVICE); display = windowManager.getDefaultDisplay(); } public IosDialog builder() { View view = LayoutInflater.from(context).inflate(R.layout.view_dialog, null); lLayout_bg = (LinearLayout) view.findViewById(R.id.lLayout_bg); lLayout_alert_ll = (LinearLayout) view.findViewById(R.id.lLayout_alert_ll); lLayout_alert_ll.setVerticalGravity(View.GONE); txt_title = (TextView) view.findViewById(R.id.txt_title); txt_title.setVisibility(View.GONE); txt_msg = (TextView) view.findViewById(R.id.txt_msg); txt_msg.setVisibility(View.GONE); btn_neg = (Button) view.findViewById(R.id.btn_neg); btn_neg.setVisibility(View.GONE); btn_pos = (Button) view.findViewById(R.id.btn_pos); btn_pos.setVisibility(View.GONE); img_line = (ImageView) view.findViewById(R.id.img_line); img_line.setVisibility(View.GONE); dialog = new Dialog(context, R.style.AlertDialogStyle); dialog.setContentView(view); lLayout_bg.setLayoutParams(new FrameLayout.LayoutParams((int) (display.getWidth() * dialogWidth), LayoutParams.WRAP_CONTENT)); return this; } public IosDialog setTitle(@NonNull String title) { showTitle = true; txt_title.setText(title); return this; } public IosDialog setMsg(@NonNull String msg) { showMsg = true; txt_msg.setText(msg); return this; } public IosDialog addEdit(String tag) { if (viewHap == null) { viewHap = new HashMap<>(); } viewHap.put(tag, getEt()); return this; } /*** * 设置对应edit 的hint * @param tag * @param hint */ public IosDialog setEditHint(String tag, String hint) { EditText editText = getEditText(tag); if (editText != null) { editText.setHint(hint); } return this; } private EditText getEditText(String tag) { if (viewHap != null) { View view = viewHap.get(tag); if (view != null) { try { return ((EditText) view); } catch (Exception e) { Throwable throwable = new Throwable("当前tag 对应的View 不是 EditText"); throwable.printStackTrace(); } } else { Throwable throwable = new Throwable("当前tag 未找到对应 的 EditText"); throwable.printStackTrace(); } } else { Throwable throwable = new Throwable("没有可以set 的 EditText"); throwable.printStackTrace(); } return null; } /*** * 设置颜色 * @param tag * @param color * @return */ public IosDialog setEditTextColor(String tag, int color) { EditText editText = getEditText(tag); if (editText != null) { editText.setTextColor(color); } return this; } /*** * padding * @param tag * @param left * @param top * @param right * @param bottom * @return */ public IosDialog setEditTextPadding(String tag, float left, float top, float right, float bottom) { EditText editText = getEditText(tag); if (editText != null) { editText.setPadding(dp2px(left), dp2px(top), dp2px(right), dp2px(bottom)); } return this; } /*** * margin * @param tag * @param left * @param top * @param right * @param bottom * @return */ public IosDialog setEditTextMargin(String tag, float left, float top, float right, float bottom) { EditText editText = getEditText(tag); if (editText != null) { LinearLayout.LayoutParams lp = new LinearLayout.LayoutParams(LinearLayout.LayoutParams.MATCH_PARENT, LinearLayout.LayoutParams.WRAP_CONTENT); lp.setMargins(dp2px(left), dp2px(top), dp2px(right), dp2px(bottom)); editText.setLayoutParams(lp); } return this; } /*** * 是否显示光标 * @param tag * @param isShowCursor * @return */ public IosDialog setCursorVisible(String tag, Boolean isShowCursor) { EditText editText = getEditText(tag); if (editText != null) { editText.setCursorVisible(isShowCursor); } return this; } private EditText getEt() { LinearLayout linearLayout = getlLayout_alert_ll(); EditText editText = new EditText(context); editText.setHint("请输入名称"); editText.setBackgroundResource(R.drawable.et_bg); editText.setTextColor(Color.BLACK); editText.setTextSize(14); editText.setCursorVisible(false); editText.setPadding(dp2px(4), dp2px(4), dp2px(4), dp2px(4)); LinearLayout.LayoutParams lp = new LinearLayout.LayoutParams(LinearLayout.LayoutParams.MATCH_PARENT, LinearLayout.LayoutParams.WRAP_CONTENT); lp.setMargins(dp2px(15), dp2px(15), dp2px(15), 0); editText.setLayoutParams(lp); linearLayout.addView(editText); return editText; } /*** * 是否点击返回能够取消 * @param cancel * @return */ public IosDialog setCancelable(boolean cancel) { dialog.setCancelable(cancel); return this; } /** * 设置是否可以取消 * * @param isCancelOutside * @return */ public IosDialog setCancelOutside(boolean isCancelOutside) { dialog.setCanceledOnTouchOutside(isCancelOutside); return this; } /** * 设置确定 * * @param text * @param listener * @return */ public IosDialog setPositiveButton(String text, final OnClickListener listener) { showPosBtn = true; btn_pos.setText(text); btn_pos.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { listener.onClick(v); dialog.dismiss(); } }); return this; } public IosDialog setPositiveButton(final OnClickListener listener) { setPositiveButton("确定", listener); return this; } public IosDialog setPositiveButton(final OnEdPositiveListener listener) { setPositiveButton("确定", listener); return this; } public IosDialog setPositiveButton(String text, final OnEdPositiveListener listener) { showPosBtn = true; btn_pos.setText(text); btn_pos.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { if (viewHap == null) { dialog.dismiss(); Throwable throwable = new Throwable("当前没有可用的EditText 请使用 OnClickListener 的接口"); throwable.printStackTrace(); return; } HashMap<String, String> strings = new HashMap<>(); for (Map.Entry<String, View> entry : viewHap.entrySet()) { String key = entry.getKey(); EditText editText = (EditText) entry.getValue(); String msg = editText.getText().toString(); strings.put(key, msg); } listener.onClick(v, strings); dialog.dismiss(); } }); return this; } /*** * 点击确定 */ public interface OnEdPositiveListener { /*** * 确定 * @param view * @param msgMap */ void onClick(View view, HashMap<String, String> msgMap); } /*** * 设置取消 * @param text * @param listener * @return */ public IosDialog setNegativeButton(String text, final OnClickListener listener) { showNegBtn = true; btn_neg.setText(text); btn_neg.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { listener.onClick(v); dialog.dismiss(); } }); return this; } public IosDialog setNegativeButton( final OnClickListener listener) { setNegativeButton("取消", listener); return this; } private void setLayout() { if (!showTitle && !showMsg) { txt_msg.setVisibility(View.GONE); txt_title.setVisibility(View.GONE); } if (showTitle) { txt_title.setVisibility(View.VISIBLE); } if (showMsg) { txt_msg.setVisibility(View.VISIBLE); } if (!showPosBtn && !showNegBtn) { btn_pos.setVisibility(View.GONE); btn_pos.setBackgroundResource(R.drawable.alertdialog_single_selector); btn_pos.setOnClickListener(new OnClickListener() { @Override public void onClick(View v) { dialog.dismiss(); } }); } if (showPosBtn && showNegBtn) { btn_pos.setVisibility(View.VISIBLE); btn_pos.setBackgroundResource(R.drawable.alertdialog_right_selector); btn_neg.setVisibility(View.VISIBLE); btn_neg.setBackgroundResource(R.drawable.alertdialog_left_selector); img_line.setVisibility(View.VISIBLE); } if (showPosBtn && !showNegBtn) { btn_pos.setVisibility(View.VISIBLE); btn_pos.setBackgroundResource(R.drawable.alertdialog_single_selector); } if (!showPosBtn && showNegBtn) { btn_neg.setVisibility(View.VISIBLE); btn_neg.setBackgroundResource(R.drawable.alertdialog_single_selector); } } public void show() { setLayout(); dialog.show(); } public void dismiss() { dialog.dismiss(); } /** * 设置dialog 宽度 * * @param dialogWidth * @return */ public IosDialog setDialogWidth(float dialogWidth) { if (lLayout_bg != null) { lLayout_bg.setLayoutParams(new FrameLayout.LayoutParams((int) (display.getWidth() * dialogWidth), LayoutParams.WRAP_CONTENT)); } this.dialogWidth = dialogWidth; return this; } /*** * 获取title * @return */ public TextView getTxt_title() { return txt_title; } /*** * 获取msg * @return */ public TextView getTxt_msg() { return txt_msg; } /** * 获取确定按钮 * * @return */ public Button getBtn_neg() { return btn_neg; } /*** * 获取用于添加自定义控件的ll * @return */ public LinearLayout getlLayout_alert_ll() { return lLayout_alert_ll; } /*** * 根据手机的分辨率从 dip 的单位 转成为 px(像素) * @param dpValue * @return */ public int dp2px(float dpValue) { final float scale = context.getResources().getDisplayMetrics().density; return (int) (dpValue * scale + 0.5f); } /** * 根据手机的分辨率从 px(像素) 的单位 转成为 dp */ public int px2dip(float pxValue) { final float scale = context.getResources().getDisplayMetrics().density; return (int) (pxValue / scale + 0.5f); } /** * 将px值转换为sp值,保证文字大小不变 */ public int px2sp(float pxValue) { final float fontScale = context.getResources().getDisplayMetrics().scaledDensity; return (int) (pxValue / fontScale + 0.5f); } /** * 将sp值转换为px值,保证文字大小不变 */ public int sp2px(float spValue) { final float fontScale = context.getResources().getDisplayMetrics().scaledDensity; return (int) (spValue * fontScale + 0.5f); } /** * 获取取消按钮 * * @return */ public Button getBtn_pos() { return btn_pos; } }
SimplyVC/panic_polkadot
src/monitors/node.py
import logging from datetime import datetime, timedelta from typing import Optional, List from src.alerters.reactive.node import Node from src.alerts.alerts import FoundLiveArchiveNodeAgainAlert from src.channels.channel import ChannelSet from src.monitors.monitor import Monitor from src.store.redis.redis_api import RedisApi from src.store.store_keys import Keys from src.utils.config_parsers.internal import InternalConfig from src.utils.config_parsers.internal_parsed import InternalConf from src.utils.data_wrapper.polkadot_api import PolkadotApiWrapper from src.utils.exceptions import \ NoLiveNodeConnectedWithAnApiServerException, \ NoLiveArchiveNodeConnectedWithAnApiServerException from src.utils.parsing import parse_int_from_string from src.utils.scaling import scale_to_pico from src.utils.types import NONE class NodeMonitor(Monitor): def __init__(self, monitor_name: str, channels: ChannelSet, logger: logging.Logger, node_monitor_max_catch_up_blocks: int, redis: Optional[RedisApi], node: Node, archive_alerts_disabled: bool, data_sources: List[Node], polkadot_api_endpoint: str, internal_conf: InternalConfig = InternalConf): super().__init__(monitor_name, channels, logger, redis, internal_conf) self._node = node self._data_wrapper = PolkadotApiWrapper(logger, polkadot_api_endpoint) self._node_monitor_max_catch_up_blocks = \ node_monitor_max_catch_up_blocks self._redis_alive_key_timeout = \ self._internal_conf.redis_node_monitor_alive_key_timeout self._redis_last_height_key_timeout = \ self._internal_conf.redis_node_monitor_last_height_key_timeout # The data sources for indirect monitoring are all nodes from the same # chain which have been set as a data source in the config. self._indirect_monitoring_data_sources = data_sources # The data sources for archive monitoring are all archive nodes from # the same chain that have been set as data source in the config. self._archive_monitoring_data_sources = [node for node in data_sources if node.is_archive_node] self.last_data_source_used = None self._last_height_checked = NONE self._session_index = NONE self._era_index = NONE self._monitor_is_catching_up = False self._indirect_monitoring_disabled = len(data_sources) == 0 self._no_live_archive_node_alert_sent = False self._archive_alerts_disabled = archive_alerts_disabled self.load_state() def is_catching_up(self) -> bool: return self._monitor_is_catching_up @property def indirect_monitoring_disabled(self) -> bool: return self._indirect_monitoring_disabled @property def node(self) -> Node: return self._node @property def session_index(self) -> int: return self._session_index @property def era_index(self) -> int: return self._era_index @property def last_height_checked(self) -> int: return self._last_height_checked @property def no_live_archive_node_alert_sent(self) -> bool: return self._no_live_archive_node_alert_sent @property def data_wrapper(self) -> PolkadotApiWrapper: return self._data_wrapper @property def indirect_monitoring_data_sources(self) -> List[Node]: return self._indirect_monitoring_data_sources @property def archive_monitoring_data_sources(self) -> List[Node]: return self._archive_monitoring_data_sources # The data_source_indirect function returns a node for the indirect # monitoring. Since indirect monitoring does not require data from past # chain state, the data_source_indirect function may return a node which is # not an archive node. @property def data_source_indirect(self) -> Node: nodes_connected_to_an_api = \ self.data_wrapper.get_web_sockets_connected_to_an_api() # Get one of the nodes to use as data source for n in self._indirect_monitoring_data_sources: if n.ws_url in nodes_connected_to_an_api and not n.is_down: self.last_data_source_used = n self._data_wrapper.ping_node(n.ws_url) return n raise NoLiveNodeConnectedWithAnApiServerException() # The data_source_archive function returns a node for archive monitoring. # Since archive monitoring requires data from past chain state, the # data_source_archive function returns only nodes which are archive nodes. @property def data_source_archive(self) -> Node: nodes_connected_to_an_api = \ self.data_wrapper.get_web_sockets_connected_to_an_api() # Get one of the archive nodes to use as data source for n in self._archive_monitoring_data_sources: if n.ws_url in nodes_connected_to_an_api and not n.is_down: self.last_data_source_used = n self._data_wrapper.ping_node(n.ws_url) return n raise NoLiveArchiveNodeConnectedWithAnApiServerException() def load_state(self) -> None: # If Redis is enabled, load the session index, era index, and last # height checked for slashing if any. if self.redis_enabled: key_si = Keys.get_node_monitor_session_index(self.monitor_name) key_ei = Keys.get_node_monitor_era_index(self.monitor_name) key_lh = Keys.get_node_monitor_last_height_checked( self.monitor_name) self._session_index = self.redis.get_int(key_si, NONE) self._era_index = self.redis.get_int(key_ei, NONE) self._last_height_checked = self.redis.get_int(key_lh, NONE) self.logger.debug( 'Restored %s state: %s=%s, %s=%s, %s=%s', self._monitor_name, key_si, self._session_index, key_lh, self._last_height_checked, key_ei, self._era_index) def save_state(self) -> None: # If Redis is enabled, save the current time indicating that the node # monitor was alive at this time, the current session index, era index, # and the last height checked. if self.redis_enabled: key_si = Keys.get_node_monitor_session_index(self.monitor_name) key_ei = Keys.get_node_monitor_era_index(self.monitor_name) key_lh = Keys.get_node_monitor_last_height_checked( self.monitor_name) key_alive = Keys.get_node_monitor_alive(self.monitor_name) self.logger.debug( 'Saving node monitor state: %s=%s, %s=%s, %s=%s', self._monitor_name, key_si, self._session_index, key_lh, self._last_height_checked, key_ei, self._era_index) # Set session and era index keys self.redis.set_multiple({ key_si: self._session_index, key_ei: self._era_index }) # Set last height checked key until = timedelta(seconds=self._redis_last_height_key_timeout) self.redis.set_for(key_lh, self._last_height_checked, until) # Set alive key (to be able to query latest update from Telegram) until = timedelta(seconds=self._redis_alive_key_timeout) self.redis.set_for( key_alive, str(datetime.now().timestamp()), until ) def status(self) -> str: if self._node.is_validator: return self._node.status() + \ ', session_index={}, era_index={}, last_height_checked={}' \ .format(self._session_index, self._era_index, self._last_height_checked) else: return self._node.status() def monitor_direct(self) -> None: # Check if node is accessible self._logger.debug('Checking if %s is alive', self._node) self._data_wrapper.ping_node(self._node.ws_url) self._node.set_as_up(self.channels, self.logger) # Get system_health system_health = self.data_wrapper.get_system_health(self._node.ws_url) # Get finalized block header finalized_head = self.data_wrapper.get_finalized_head(self.node.ws_url) finalized_block_header = self.data_wrapper.get_header(self.node.ws_url, finalized_head) # Set is-syncing is_syncing = system_health['isSyncing'] self._logger.debug('%s is syncing: %s', self._node, is_syncing) self._node.set_is_syncing(is_syncing, self.channels, self.logger) # Set number of peers no_of_peers = system_health['peers'] self._logger.debug('%s no. of peers: %s', self._node, no_of_peers) self._node.set_no_of_peers(no_of_peers, self.channels, self.logger) # Update finalized block finalized_block_height = parse_int_from_string( str(finalized_block_header['number'])) self._logger.debug('%s finalized_block_height: %s', self._node, finalized_block_height) self._node.update_finalized_block_height(finalized_block_height, self.logger, self.channels) # Set API as up, and declare that the node was connected to the API self.data_wrapper.set_api_as_up(self.monitor_name, self.channels) self.node.connect_with_api(self.channels, self.logger) def _check_for_slashing(self, height_to_check: int, archive_node: Node) \ -> None: block_hash = self.data_wrapper.get_block_hash(archive_node.ws_url, height_to_check) slash_amount = self.data_wrapper.get_slash_amount( archive_node.ws_url, block_hash, self.node.stash_account_address) if slash_amount > 0: scaled_slash_amount = round(scale_to_pico(slash_amount), 3) self.node.slash(scaled_slash_amount, self.channels, self.logger) def _check_for_new_session(self, new_session_index: int) -> None: self._logger.debug('%s session_index: %s', self._node, new_session_index) if self._session_index is NONE: self._session_index = new_session_index elif self._session_index < new_session_index: self._session_index = new_session_index # The number of blocks authored are recorded per session not era self._node.reset_no_of_blocks_authored(self.channels, self.logger) def _check_for_new_era(self, new_era_index: int) -> None: self._logger.debug('%s era_index: %s', self._node, new_era_index) if self._era_index is NONE: self._era_index = new_era_index elif self._era_index < new_era_index: self._era_index = new_era_index # Reset timers on a new era to raise not authoring alerts per era, # not session. self._node.set_time_of_last_block(NONE, self.channels, self.logger) self._node.blocks_authored_alert_limiter.did_task() self._node.set_is_authoring(True, self.channels, self.logger) self._node.set_time_of_last_block_check_activity( NONE, self.channels, self.logger) def _monitor_archive_state(self) -> None: # Check for slashing # Data source must be saved to avoid situations where # last_height_to_check < finalized_block_height archive_node = self.data_source_archive last_height_to_check = archive_node.finalized_block_height if self._last_height_checked == NONE: self._last_height_checked = last_height_to_check - 1 height_to_check = self._last_height_checked + 1 # If the data source node's finalized height is less than the height # already checked, there is no need to check that block. if last_height_to_check < height_to_check: pass elif last_height_to_check - self._last_height_checked > \ self._node_monitor_max_catch_up_blocks: height_to_check = last_height_to_check - \ self._node_monitor_max_catch_up_blocks self._check_for_slashing(height_to_check, archive_node) self._last_height_checked = height_to_check elif height_to_check <= last_height_to_check: self._check_for_slashing(height_to_check, archive_node) self._last_height_checked = height_to_check if last_height_to_check - self._last_height_checked > 2: self._monitor_is_catching_up = True else: self._monitor_is_catching_up = False # Unset, so that if in the next monitoring round an archive node is not # found, the operator is informed accordingly. if self._no_live_archive_node_alert_sent: self._no_live_archive_node_alert_sent = False self.channels.alert_info(FoundLiveArchiveNodeAgainAlert( self.monitor_name)) def _monitor_indirect_validator(self) -> None: session_validators = self.data_wrapper.get_session_validators( self.data_source_indirect.ws_url) stakers_json = self.data_wrapper.get_eras_stakers( self.data_source_indirect.ws_url, self._node.stash_account_address) council_members = self.data_wrapper.get_council_members( self.data_source_indirect.ws_url) staking_validators = self.data_wrapper.get_derive_staking_validators( self.data_source_indirect.ws_url) new_session_index = parse_int_from_string(str( self.data_wrapper.get_current_index( self.data_source_indirect.ws_url))) new_number_of_blocks_authored = parse_int_from_string(str( self.data_wrapper.get_authored_blocks( self.data_source_indirect.ws_url, new_session_index, self.node.stash_account_address))) disabled_validators = self.data_wrapper.get_disabled_validators( self.data_source_indirect.ws_url) active_era = self.data_wrapper.get_active_era( self.data_source_indirect.ws_url) new_era_index = parse_int_from_string(str(active_era['index'])) # Set active is_active = self._node.stash_account_address in session_validators self._logger.debug('%s active: %s', self._node, is_active) self.node.set_active(is_active, self.channels, self.logger) # Set auth_index if self._node.is_active: new_auth_index = session_validators.index( self.node.stash_account_address) self._logger.debug('%s auth_index: %s', self._node, new_auth_index) self._node.set_auth_index(new_auth_index, self.logger) # Set disabled is_disabled = self.node.auth_index in disabled_validators self._logger.debug('%s disabled: %s', self._node, is_disabled) self.node.set_disabled(is_disabled, new_session_index, self.channels, self.logger) # Set elected elected_validators = staking_validators['nextElected'] is_elected = self._node.stash_account_address in elected_validators self._logger.debug('%s elected: %s', self._node, is_elected) self.node.set_elected(is_elected, self.channels, self.logger) # Set bonded_balance bonded_balance = parse_int_from_string(str(stakers_json['total'])) self._logger.debug('%s bonded_balance: %s', self._node, bonded_balance) self._node.set_bonded_balance(bonded_balance, self.channels, self.logger) # Set council_member is_council_member = self._node.stash_account_address in council_members self._logger.debug('%s is council member: %s', self._node, is_council_member) self.node.set_council_member(is_council_member, self.channels, self.logger) # Set session index self._check_for_new_session(new_session_index) # Set era index self._check_for_new_era(new_era_index) # Set number of blocks authored self._logger.debug('%s number_of_blocks_authored: %s', self._node, new_number_of_blocks_authored) self._node.set_no_of_blocks_authored(self.channels, self.logger, new_number_of_blocks_authored, self._era_index) if not self._archive_alerts_disabled: self._monitor_archive_state() def _monitor_indirect_full_node(self) -> None: # These are not needed for full nodes, and thus must be given a # dummy value since NoneTypes cannot be saved in redis. # Set session index and era index. self._session_index = NONE self._era_index = NONE # Set bonded balance balance = 0 self._logger.debug('%s balance: %s', self._node, balance) self._node.set_bonded_balance(balance, self.channels, self.logger) # Set active self._logger.debug('%s is active: %s', self._node, False) self._node.set_active(False, self.channels, self.logger) # Set disabled self._logger.debug('%s is disabled: %s', self._node, False) self._node.set_disabled(False, self._session_index, self.channels, self.logger) # Set elected self._logger.debug('%s is elected: %s', self._node, False) self._node.set_elected(False, self.channels, self.logger) # Set council_member self._logger.debug('%s is council member: %s', self._node, False) self._node.set_council_member(False, self.channels, self.logger) def monitor_indirect(self) -> None: if self._node.is_validator: self._monitor_indirect_validator() # Set API as up and declare the used node as connected with the API self.data_wrapper.set_api_as_up(self.monitor_name, self.channels) self.last_data_source_used.connect_with_api( self.channels, self.logger) else: self._monitor_indirect_full_node() def monitor(self) -> None: # Monitor part of the node state by querying the node directly self.monitor_direct() # Monitor part of the node state by querying the node indirectly if # indirect monitoring is enabled. if not self.indirect_monitoring_disabled: self.monitor_indirect() # Output status self._logger.info('%s status: %s', self._node, self.status())
organ-xqTeam/campus-management
school-educationalAdministration/src/main/java/com/ruoyi/project/system/SchoolSpecialty/service/ISchoolSpecialtyService.java
<gh_stars>0 package com.ruoyi.project.system.SchoolSpecialty.service; import com.ruoyi.project.system.SchoolSpecialty.domain.SchoolSpecialty; import java.util.List; /** * 学校专业Service接口 * * @author ruoyi * @date 2020-01-14 */ public interface ISchoolSpecialtyService { /** * 查询学校专业 * * @param id 学校专业ID * @return 学校专业 */ public SchoolSpecialty selectSchoolSpecialtyById(Long id); /** * 查询学校专业列表 * * @param schoolSpecialty 学校专业 * @return 学校专业集合 */ public List<SchoolSpecialty> selectSchoolSpecialtyList(SchoolSpecialty schoolSpecialty); /** * 新增学校专业 * * @param schoolSpecialty 学校专业 * @return 结果 */ public int insertSchoolSpecialty(SchoolSpecialty schoolSpecialty); /** * 修改学校专业 * * @param schoolSpecialty 学校专业 * @return 结果 */ public int updateSchoolSpecialty(SchoolSpecialty schoolSpecialty); /** * 批量删除学校专业 * * @param ids 需要删除的数据ID * @return 结果 */ public int deleteSchoolSpecialtyByIds(String ids); /** * 删除学校专业信息 * * @param id 学校专业ID * @return 结果 */ public int deleteSchoolSpecialtyById(Long id); }
Viridity-Energy/vGraph
src/component/Zoom.js
<reponame>Viridity-Energy/vGraph var makeEventing = require('../lib/Eventing.js'); class Zoom{ constructor(){ this.reset(); } setRatio( left, right, bottom, top ){ if ( left > right ){ this.left = right; this.right = left; }else{ this.left = left; this.right = right; } if ( top ){ if ( bottom > top ){ this.top = bottom; this.bottom = top; }else{ this.top = top; this.bottom = bottom; } } this.$trigger('update',{min:this.left,max:this.right},{min:this.bottom,max:this.top}); } reset(){ this.left = 0; this.right = 1; this.bottom = 0; this.top = 1; } } makeEventing( Zoom.prototype ); module.exports = Zoom;
blueww/azure-sdk-for-node
lib/services/batch/lib/models/jobPreparationTask.js
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ 'use strict'; const models = require('./index'); /** * @summary A Job Preparation task to run before any tasks of the job on any * given compute node. * * You can use Job Preparation to prepare a compute node to run tasks for the * job. Activities commonly performed in Job Preparation include: Downloading * common resource files used by all the tasks in the job. The Job Preparation * task can download these common resource files to the shared location on the * compute node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service * on the compute node so that all tasks of that job can communicate with it. * If the Job Preparation task fails (that is, exhausts its retry count before * exiting with exit code 0), Batch will not run tasks of this job on the * compute node. The node remains ineligible to run tasks of this job until it * is reimaged. The node remains active and can be used for other jobs. The Job * Preparation task can run multiple times on the same compute node. Therefore, * you should write the Job Preparation task to handle re-execution. If the * compute node is rebooted, the Job Preparation task is run again on the node * before scheduling any other task of the job, if * rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did not * previously complete. If the compute node is reimaged, the Job Preparation * task is run again before scheduling any task of the job. * */ class JobPreparationTask { /** * Create a JobPreparationTask. * @member {string} [id] A string that uniquely identifies the Job * Preparation task within the job. The ID can contain any combination of * alphanumeric characters including hyphens and underscores and cannot * contain more than 64 characters. If you do not specify this property, the * Batch service assigns a default value of 'jobpreparation'. No other task * in the job can have the same ID as the Job Preparation task. If you try to * submit a task with the same id, the Batch service rejects the request with * error code TaskIdSameAsJobPreparationTask; if you are calling the REST API * directly, the HTTP status code is 409 (Conflict). * @member {string} commandLine The command line of the Job Preparation task. * The command line does not run under a shell, and therefore cannot take * advantage of shell features such as environment variable expansion. If you * want to take advantage of such features, you should invoke the shell in * the command line, for example using "cmd /c MyCommand" in Windows or * "/bin/sh -c MyCommand" in Linux. * @member {object} [containerSettings] The settings for the container under * which the Job Preparation task runs. When this is specified, all * directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of * Azure Batch directories on the node) are mapped into the container, all * task environment variables are mapped into the container, and the task * command line is executed in the container. * @member {string} [containerSettings.containerRunOptions] These additional * options are supplied as arguments to the "docker create" command, in * addition to those controlled by the Batch Service. * @member {string} [containerSettings.imageName] This is the full image * reference, as would be specified to "docker pull". If no tag is provided * as part of the image name, the tag ":latest" is used as a default. * @member {object} [containerSettings.registry] This setting can be omitted * if was already provided at pool creation. * @member {string} [containerSettings.registry.registryServer] If omitted, * the default is "docker.io". * @member {string} [containerSettings.registry.userName] * @member {string} [containerSettings.registry.password] * @member {array} [resourceFiles] A list of files that the Batch service * will download to the compute node before running the command line. Files * listed under this element are located in the task's working directory. * @member {array} [environmentSettings] A list of environment variable * settings for the Job Preparation task. * @member {object} [constraints] Constraints that apply to the Job * Preparation task. * @member {moment.duration} [constraints.maxWallClockTime] If this is not * specified, there is no time limit on how long the task may run. * @member {moment.duration} [constraints.retentionTime] The default is * infinite, i.e. the task directory will be retained until the compute node * is removed or reimaged. * @member {number} [constraints.maxTaskRetryCount] Note that this value * specifically controls the number of retries. The Batch service will try * the task once, and may then retry up to this limit. For example, if the * maximum retry count is 3, Batch tries the task up to 4 times (one initial * try and 3 retries). If the maximum retry count is 0, the Batch service * does not retry the task. If the maximum retry count is -1, the Batch * service retries the task without limit. * @member {boolean} [waitForSuccess] Whether the Batch service should wait * for the Job Preparation task to complete successfully before scheduling * any other tasks of the job on the compute node. A Job Preparation task has * completed successfully if it exits with exit code 0. If true and the Job * Preparation task fails on a compute node, the Batch service retries the * Job Preparation task up to its maximum retry count (as specified in the * constraints element). If the task has still not completed successfully * after all retries, then the Batch service will not schedule tasks of the * job to the compute node. The compute node remains active and eligible to * run tasks of other jobs. If false, the Batch service will not wait for the * Job Preparation task to complete. In this case, other tasks of the job can * start executing on the compute node while the Job Preparation task is * still running; and even if the Job Preparation task fails, new tasks will * continue to be scheduled on the node. The default value is true. * @member {object} [userIdentity] The user identity under which the Job * Preparation task runs. If omitted, the task runs as a non-administrative * user unique to the task on Windows nodes, or a a non-administrative user * unique to the pool on Linux nodes. * @member {string} [userIdentity.userName] The userName and autoUser * properties are mutually exclusive; you must specify one but not both. * @member {object} [userIdentity.autoUser] The userName and autoUser * properties are mutually exclusive; you must specify one but not both. * @member {string} [userIdentity.autoUser.scope] Values are: * * pool - specifies that the task runs as the common auto user account which * is created on every node in a pool. * task - specifies that the service should create a new user for the task. * The default value is task. Possible values include: 'task', 'pool' * @member {string} [userIdentity.autoUser.elevationLevel] nonAdmin - The * auto user is a standard user without elevated access. admin - The auto * user is a user with elevated access and operates with full Administrator * permissions. The default value is nonAdmin. Possible values include: * 'nonAdmin', 'admin' * @member {boolean} [rerunOnNodeRebootAfterSuccess] Whether the Batch * service should rerun the Job Preparation task after a compute node * reboots. The Job Preparation task is always rerun if a compute node is * reimaged, or if the Job Preparation task did not complete (e.g. because * the reboot occurred while the task was running). Therefore, you should * always write a Job Preparation task to be idempotent and to behave * correctly if run multiple times. The default value is true. */ constructor() { } /** * Defines the metadata of JobPreparationTask * * @returns {object} metadata of JobPreparationTask * */ mapper() { return { required: false, serializedName: 'JobPreparationTask', type: { name: 'Composite', className: 'JobPreparationTask', modelProperties: { id: { required: false, serializedName: 'id', type: { name: 'String' } }, commandLine: { required: true, serializedName: 'commandLine', type: { name: 'String' } }, containerSettings: { required: false, serializedName: 'containerSettings', type: { name: 'Composite', className: 'TaskContainerSettings' } }, resourceFiles: { required: false, serializedName: 'resourceFiles', type: { name: 'Sequence', element: { required: false, serializedName: 'ResourceFileElementType', type: { name: 'Composite', className: 'ResourceFile' } } } }, environmentSettings: { required: false, serializedName: 'environmentSettings', type: { name: 'Sequence', element: { required: false, serializedName: 'EnvironmentSettingElementType', type: { name: 'Composite', className: 'EnvironmentSetting' } } } }, constraints: { required: false, serializedName: 'constraints', type: { name: 'Composite', className: 'TaskConstraints' } }, waitForSuccess: { required: false, serializedName: 'waitForSuccess', type: { name: 'Boolean' } }, userIdentity: { required: false, serializedName: 'userIdentity', type: { name: 'Composite', className: 'UserIdentity' } }, rerunOnNodeRebootAfterSuccess: { required: false, serializedName: 'rerunOnNodeRebootAfterSuccess', type: { name: 'Boolean' } } } } }; } } module.exports = JobPreparationTask;
DylanSalisbury/advent-of-code-2021
15/util.py
"""Helper functions.""" def parse_grid(n): result = dict() row = 0 for line in n.split('\n'): if len(line) > 0: for col in range(len(line)): result[row, col] = int(line[col]) row += 1 return result def lowest_risk_path(grid): costs = dict() rev_points = tuple(reversed(sorted(grid.keys()))) costs[rev_points[0]] = 0 for p in rev_points[1:]: right = (p[0], p[1]+1) down = (p[0]+1, p[1]) neighbor_costs = [] if right in costs: neighbor_costs.append(grid[right] + costs[right]) if down in costs: neighbor_costs.append(grid[down] + costs[down]) costs[p] = min(neighbor_costs) result = costs[(0, 0)] # This part is not covered properly by tests, # and takes 2 minutes to run on full input file :( while True: print("Trying to improve result...") old_result = result improved, new_costs = improve(costs, grid) if not improved: return result result = new_costs[(0, 0)] costs = new_costs def improve(old_costs, grid): improved = False costs = dict() rev_points = tuple(reversed(sorted(grid.keys()))) costs[rev_points[0]] = 0 for p in rev_points[1:]: neighbors = ( (p[0], p[1]+1), (p[0]+1, p[1]), (p[0], p[1]-1), (p[0]-1, p[1])) neighbor_costs = [] for n in neighbors: if n in old_costs: neighbor_costs.append(grid[n] + old_costs[n]) if n in costs: neighbor_costs.append(grid[n] + costs[n]) costs[p] = min(neighbor_costs) if costs[p] < old_costs[p]: improved = True return improved, costs def expand_grid(grid): corner = max(grid.keys()) result = dict() for k in grid: for r in range(5): for c in range(5): v = 1 + ((grid[k] + r + c) - 1) % 9 p = (k[0] + r * (1 + corner[0]), k[1] + c * (1 + corner[1])) result[p] = v return result def func(n): return n
Relintai/rcpp_framework
core/renderer/opengl/texture.h
#ifndef TEXTURE_H #define TEXTURE_H #include "opengl.h" #include "sdl.inc.h" class Texture { public: enum TextureFilter { TEXTURE_FILTER_NEAREST = 0, TEXTURE_FILTER_LINEAR, }; void load_image(const char* file_name, const int format = GL_RGB, const int internal_components = GL_RGB); void apply_filer(); TextureFilter filter; GLuint texture; SDL_Surface *image; Texture(); virtual ~Texture(); }; #endif
exports-io/angular2-http
node_modules/@reactivex/rxjs/dist/es6/operators/map.js
import Subscriber from '../Subscriber'; import tryCatch from '../util/tryCatch'; import { errorObject } from '../util/errorObject'; import bindCallback from '../util/bindCallback'; /** * Similar to the well known `Array.prototype.map` function, this operator * applies a projection to each value and emits that projection in the returned observable * * @param {Function} project the function to create projection * @param {any} [thisArg] an optional argument to define what `this` is in the project function * @returns {Observable} a observable of projected values */ export default function map(project, thisArg) { return this.lift(new MapOperator(project, thisArg)); } class MapOperator { constructor(project, thisArg) { this.project = bindCallback(project, thisArg, 2); } call(subscriber) { return new MapSubscriber(subscriber, this.project); } } class MapSubscriber extends Subscriber { constructor(destination, project) { super(destination); this.count = 0; this.project = project; } _next(x) { const result = tryCatch(this.project)(x, this.count++); if (result === errorObject) { this.error(errorObject.e); } else { this.destination.next(result); } } }
552301/raisin-platform
raisin-business/file-center/src/main/java/com/raisin/FileCenterApp.java
package com.raisin; import com.raisin.common.ribbon.annotation.EnableFeignInterceptor; import com.raisin.file.properties.FileServerProperties; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.cloud.client.discovery.EnableDiscoveryClient; import org.springframework.cloud.openfeign.EnableFeignClients; /** * 文件中心 * @author 作者 owen E-mail: <EMAIL> */ @EnableDiscoveryClient @EnableConfigurationProperties(FileServerProperties.class) @EnableFeignClients @EnableFeignInterceptor @SpringBootApplication public class FileCenterApp { public static void main(String[] args) { SpringApplication.run(FileCenterApp.class, args); } }
zippy/metaform
spec/dummy/forms/simpleform_extras.rb
class ::FieldNameHasG < Property def self.evaluate(form,field,value) field.name =~ /g/ ? true : false end def self.render(question_html,property_value,question,form,field,read_only) if property_value if read_only question_html + 'g question read only!' else question_html + 'g question!' end else question_html end end end
android-xiao-jun/android-chat
client/src/main/java/cn/wildfirechat/message/notification/PCLoginRequestMessageContent.java
/* * Copyright (c) 2020 WildFireChat. All rights reserved. */ package cn.wildfirechat.message.notification; import android.os.Parcel; import org.json.JSONException; import org.json.JSONObject; import cn.wildfirechat.message.Message; import cn.wildfirechat.message.MessageContent; import cn.wildfirechat.message.core.ContentTag; import cn.wildfirechat.message.core.MessageContentType; import cn.wildfirechat.message.core.MessagePayload; import cn.wildfirechat.message.core.PersistFlag; @ContentTag(type = MessageContentType.ContentType_PC_LOGIN_REQUSET, flag = PersistFlag.No_Persist) public class PCLoginRequestMessageContent extends MessageContent { // 3 windows, 4 osx, 5 web private int platform; private String sessionId; public int getPlatform() { return platform; } public void setPlatform(int platform) { this.platform = platform; } public String getSessionId() { return sessionId; } public void setSessionId(String sessionId) { this.sessionId = sessionId; } @Override public MessagePayload encode() { // never return null; } @Override public void decode(MessagePayload payload) { try { JSONObject obj = new JSONObject(new String(payload.binaryContent)); platform = obj.optInt("p"); sessionId = obj.optString("t"); } catch (JSONException e) { e.printStackTrace(); } } @Override public String digest(Message message) { return null; } @Override public int describeContents() { return 0; } @Override public void writeToParcel(Parcel dest, int flags) { super.writeToParcel(dest, flags); dest.writeInt(this.platform); dest.writeString(this.sessionId); } public PCLoginRequestMessageContent() { } protected PCLoginRequestMessageContent(Parcel in) { super(in); this.platform = in.readInt(); this.sessionId = in.readString(); } public static final Creator<PCLoginRequestMessageContent> CREATOR = new Creator<PCLoginRequestMessageContent>() { @Override public PCLoginRequestMessageContent createFromParcel(Parcel source) { return new PCLoginRequestMessageContent(source); } @Override public PCLoginRequestMessageContent[] newArray(int size) { return new PCLoginRequestMessageContent[size]; } }; }
andriymoroz/IES
src/common/fm_state_machine.c
/* vim:ts=4:sw=4:expandtab * (No tabs, indent level is 4 spaces) */ /***************************************************************************** * File: fm_state_machine.c * Creation Date: October 8, 2013 * Description: Generic State Machine implementation * * Copyright (c) 2007 - 2015, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ #include <fm_sdk_int.h> /***************************************************************************** * Macros, Constants & Types *****************************************************************************/ #define STATE_MACHINE_MAGIC_NUMBER 0x75A9156F #define GET_TABLE_ENTRY_PTR( table, i, j, cols ) ((table)+(i)*(cols)+(j)) #define GET_TABLE_ENTRY( table, i, j, cols ) *((table)+(i)*(cols)+(j)) #define TAKE_GSME_LOCK() fmCaptureLock( &smEngine.lock, FM_WAIT_FOREVER ) #define DROP_GSME_LOCK() fmReleaseLock( &smEngine.lock ) #define FLAG_TAKE_GSME_LOCK() \ { \ TAKE_GSME_LOCK(); \ gsmeLockTaken = TRUE; \ } #define FLAG_DROP_GSME_LOCK() \ { \ DROP_GSME_LOCK(); \ gsmeLockTaken = FALSE; \ } #define TAKE_CALLER_LOCK( info ) fmCaptureLock( (info)->lock, FM_WAIT_FOREVER ) #define DROP_CALLER_LOCK( info ) fmReleaseLock( (info)->lock ) #define FLAG_TAKE_CALLER_LOCK( info ) \ { \ TAKE_CALLER_LOCK( info ); \ callerLockTaken = TRUE; \ } #define FLAG_DROP_CALLER_LOCK( info ) \ { \ DROP_CALLER_LOCK( info ); \ callerLockTaken = FALSE; \ } /* internal data structure representing a registered state machine type */ typedef struct _fm_stateMachineType { /* state machine type */ fm_int smType; /* number of states */ fm_int nrStates; /* number of states */ fm_int nrEvents; /* pointer to the state machine transition table */ fm_smTransitionEntry *smTransitionTable; /* default action callback */ fm_smTransitionLogCallback logCallback; /* linked list node */ FM_DLL_DEFINE_NODE( _fm_stateMachineType, next, prev ); } fm_stateMachineType; /* internal generic state machine data structure */ typedef struct _fm_stateMachine { /* magic number */ fm_uint32 smMagicNumber; /* reference value */ fm_uint32 smRefValue; /* user's ID for this state machine */ fm_int smUserID; /* pointer to the state machine type structure */ fm_stateMachineType *type; /* current state */ fm_int curState; /* pointer to the state transition history */ fm_smTransitionRecord *smTransitionHistory; /* Size of the state transition history */ fm_int transitionHistorySize; /* event data buffer */ fm_byte *recordData; /* Caller-specific event data size to be saved in the transition record */ fm_int recordDataSize; /* Number of state transitions occurred on this state machine */ fm_int nrTransitions; /* timestamp at initialization time */ fm_timestamp initTimeStamp; /* linked list node */ FM_DLL_DEFINE_NODE( _fm_stateMachine, next, prev ); } fm_stateMachine; /* internal data structure representing the state machine engine */ typedef struct _fm_stateMachineEngine { /* flag indicating that the State Machine initialization was successful */ fm_bool init; /* number of registered state machine types */ fm_int nrRegisteredTypes; /* reference value */ fm_uint32 refValue; /* GSME lock */ fm_lock lock; /* Init time to be used as reference for event time stamping */ fm_timestamp initTime; fm_smTimestampMode tsMode; /* linked list of registered state machine types */ FM_DLL_DEFINE_LIST( _fm_stateMachineType, smTypeHead, smTypeTail ); /* linked list of existing state machines */ FM_DLL_DEFINE_LIST( _fm_stateMachine, smHead, smTail ); } fm_stateMachineEngine; /***************************************************************************** * Local function prototypes *****************************************************************************/ static fm_status CreateStateMachine( fm_int smUserID, fm_int historySize, fm_int recordDataSize, fm_smHandle *handle ); static fm_status StartStateMachine( fm_smHandle handle, fm_int smType, fm_int initState ); static fm_status InitRecordDataPtr( fm_stateMachine *sm, fm_int historySize ); static fm_status SaveTransitionRecord( fm_stateMachine *sm, fm_smTransitionRecord *record, void *recordData ); static fm_stateMachineType *SearchRegisteredStateMachineTypes( fm_int smType ); static fm_stateMachine *SearchExistingStateMachinesByType( fm_int smType ); static fm_status ClearStateTransitionHistory( fm_stateMachine *sm ); static fm_status SaveEventTime( fm_stateMachine *sm, fm_timestamp *ts ); /***************************************************************************** * Global Variables *****************************************************************************/ /***************************************************************************** * Local Variables *****************************************************************************/ static fm_stateMachineEngine smEngine = { FALSE, }; /***************************************************************************** * Local Functions *****************************************************************************/ /*****************************************************************************/ /** ClearStateTransitionHistory * \ingroup intStateMachine * * \desc Internal version of ''fmClearStateTransitionHistory''. It * clears the state transition history buffer, but skips * the initial sanity checks * * \param[in] sm is the pointer to a caller-allocated structure * representing the state machine instance whose history * buffer needs to be cleared * * \return FM_OK *****************************************************************************/ static fm_status ClearStateTransitionHistory( fm_stateMachine *sm ) { sm->nrTransitions = 0; if ( smEngine.tsMode == FM_GSME_TIMESTAMP_MODE_SINCE_CLEAR ) { fmGetTime( &sm->initTimeStamp ); } return FM_OK; } /* end ClearStateTransitionHistory */ /*****************************************************************************/ /** fmCreateStateMachine * \ingroup intStateMachine * * \desc Internal function to create a state machine by allocating * all necessary internal data structures. For the state * machine to become operational, it needs to be bound to * a registered state machine type and therefore to a state * machine transition table by calling ''fmStartStateMachine'' * * \param[in] smUserID The user's ID for this state machine * * \param[in] historySize is the number of records of the transition * historybuffer for this state machine. The state machine * engine will keep a buffer containing the most recente * historySize entries of type ''fm_smTransitionRecord'' for a * given state machine. It can be zero to disable transition * history tracking * * \param[in] recordDataSize is the size in bytes of the caller-provided * event data buffer that is saved by GSME in a transition * record upon event notification * * \param[out] handle is a pointer to a caller-allocated area where this * function will return a handle for this state machine. The * caller is required to use this handle for any subsequent * operation on the same state machine * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_NO_MEM if there was a memory allocation failure * while creating one of the state machine internal structures * *****************************************************************************/ static fm_status CreateStateMachine( fm_int smUserID, fm_int historySize, fm_int recordDataSize, fm_smHandle *handle ) { fm_status status; fm_stateMachine *sm; fm_int recordSize; fm_int eventBufferSize; /* validate input arguments */ if ( historySize < 0 || handle == NULL ) { sm = NULL; status = FM_ERR_INVALID_ARGUMENT; goto ABORT; } /* Allocate memory for the internal state machine structuree */ sm = fmAlloc( sizeof(fm_stateMachine )); if ( sm == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* clear the object */ FM_MEMSET_S( sm, sizeof(fm_stateMachine), 0, sizeof(fm_stateMachine) ); if ( historySize > 0 ) { /* each record will have a caller-specific data buffer piggybacked */ recordSize = historySize * sizeof(fm_smTransitionRecord); /* allocate the state transition history table */ sm->smTransitionHistory = fmAlloc( recordSize ); if ( sm->smTransitionHistory == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* clear the object */ FM_MEMSET_S( sm->smTransitionHistory, recordSize, 0, recordSize ); /* allocate the data buffer for the transition records */ eventBufferSize = recordDataSize * historySize; sm->recordData = fmAlloc( eventBufferSize ); if ( sm->recordData == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* clear the object */ FM_MEMSET_S(sm->recordData, eventBufferSize, 0, eventBufferSize); InitRecordDataPtr( sm, historySize ); } /* end if ( historySize > 0 ) */ /* now fill out the state machine data structure */ sm->smMagicNumber = STATE_MACHINE_MAGIC_NUMBER; sm->smUserID = smUserID; sm->transitionHistorySize = historySize; sm->recordDataSize = recordDataSize; sm->initTimeStamp = smEngine.initTime; /* Currently it's not bound to any state machine type */ sm->type = NULL; /* add it to the list of existing state machines */ FM_DLL_INSERT_LAST( &smEngine, smHead, smTail, sm, next, prev ); /* the handle is the pointer to this state machine structure */ *(fm_stateMachine **)handle = sm; /* State machine created successfully */ sm->smRefValue = smEngine.refValue++; status = FM_OK; ABORT: /* Do some clean up if the there was an error */ if ( status != FM_OK ) { /* Was the state machine structure created? */ if ( sm != NULL ) { /* Yes, was the state transition table created? */ if ( sm->smTransitionHistory != NULL ) { /* Yes, then free it */ fmFree( sm->smTransitionHistory ); } /* Was the event data buffer created? */ if ( sm->recordData != NULL ) { /* Yes, then free it */ fmFree( sm->recordData ); } fmFree( sm ); } /* end if ( sm != NULL) */ } /* end if ( status != FM_OK ) */ return status; } /* end CreateStateMachine */ /*****************************************************************************/ /** StartStateMachine * \ingroup intStateMachine * * \desc Internal function to puts a state machine in operational * state by binding it to a registered state machine type * (and therefore to a State Machine Transition table) and * by setting its initial state * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \param[in] smType is a reserved key used by the caller to uniquely * identify the state machine type * * \param[in] initState is the initial state for this state machine * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine * * \return FM_ERR_STATE_MACHINE_TYPE if the specified type does not * appear to have been registered * * \return FM_ERR_BOUND_STATE_MACHINE if the operation is attempted * on a state machine that is already bound to a state * transition type. ''fmStopStateMachineMachine'' should be * invoked before a new binding can be created. *****************************************************************************/ static fm_status StartStateMachine( fm_smHandle handle, fm_int smType, fm_int initState ) { fm_status status; fm_stateMachineType *type; fm_stateMachine *sm; fm_smTransitionRecord record; fm_voidptr recordData; /* consistency check on the handle and other input arguments */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* Is this an unbound state machine? */ if ( sm->type != NULL ) { /* no, it must be closed first */ status = FM_ERR_BOUND_STATE_MACHINE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* make sure the indicated type is registered */ type = SearchRegisteredStateMachineTypes( smType ); if ( type == NULL ) { /* no, it must be closed first */ status = FM_ERR_STATE_MACHINE_TYPE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* We confirmed the type is registered, we can now check the initState */ if ( initState < 0 || initState >= type->nrStates ) { status = FM_ERR_INVALID_ARGUMENT; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } sm->type = type; sm->curState = initState; /* fill out the transition record, with a pseudo-event */ SaveEventTime( sm, &record.eventTime ); record.eventInfo.smType = type->smType; record.eventInfo.eventId = FM_EVENT_UNSPECIFIED; record.smUserID = sm->smUserID; record.currentState = FM_STATE_UNSPECIFIED; record.nextState = initState; record.status = FM_OK; recordData = fmAlloc( sm->recordDataSize ); if ( recordData ) { FM_MEMSET_S( recordData, sm->recordDataSize, 0, sm->recordDataSize ); status = SaveTransitionRecord( sm, &record, recordData ); fmFree( recordData ); } else { status = FM_ERR_NO_MEM; } /* successful if we got here */ status = FM_OK; ABORT: return status; } /* end StartStateMachine */ /*****************************************************************************/ /** SearchRegisteredStateMachineTypes * \ingroup intStateMachine * * \desc Internal function that searches the list of registered * state machine type objects looking for one matching a * given type * * \param[in] smType is a reserved key used by the caller to uniquely * identify the state machine type * * \return Pointer to the state machine type object, if found. NULL * otherwise *****************************************************************************/ static fm_stateMachineType *SearchRegisteredStateMachineTypes( fm_int smType ) { fm_stateMachineType *type; type = FM_DLL_GET_FIRST( &smEngine, smTypeHead ); while ( type != NULL && type->smType != smType ) { type = FM_DLL_GET_NEXT( type, next ); } return type; } /* end SearchRegisteredStateMachineTypes */ /*****************************************************************************/ /** SearchExistingStateMachinesByType * \ingroup intStateMachine * * \desc Internal function that searches the list of existing * state machine objects looking for one matching a given * type. If multiple state machines exist this function * returns at the first the pointer to the first one found * * \param[in] smType is a reserved key used by the caller to uniquely * * \return Pointer to the state machine object, if found. NULL * otherwise *****************************************************************************/ static fm_stateMachine *SearchExistingStateMachinesByType( fm_int smType ) { fm_stateMachine *sm; sm = FM_DLL_GET_FIRST( &smEngine, smHead ); while ( sm != NULL && ( sm->type == NULL || sm->type->smType != smType ) ) { sm = FM_DLL_GET_NEXT( sm, next ); } return sm; } /* end SearchExistingStateMachinesByType */ /*****************************************************************************/ /** SaveTransitionRecord * \ingroup intStateMachine * * \desc Save a transition record for this state machine. * * \param[in] sm is the pointer to a caller-allocated structure * representing the state machine instance for which where * this transition record needs to be saved. * * \param[out] record is the pointer to a caller-allocated structure * containing the transition record to be saved. * * \param[in] recordData points to the event data. * * \return FM_OK if successful. * *****************************************************************************/ static fm_status SaveTransitionRecord( fm_stateMachine *sm, fm_smTransitionRecord *record, void *recordData ) { fm_int recordIdx; fm_status status; fm_smTransitionLogCallback log; fm_smTransitionRecord *recordPtr; /* record the transition whether or not it was successful */ /* but only if transaction history tracking is enabled */ if ( sm->transitionHistorySize > 0 ) { recordIdx = sm->nrTransitions % sm->transitionHistorySize; recordPtr = &sm->smTransitionHistory[recordIdx]; *recordPtr = *record; recordPtr->recordData = sm->recordData + recordIdx*sm->recordDataSize; FM_MEMCPY_S( recordPtr->recordData, sm->recordDataSize, recordData, sm->recordDataSize ); sm->nrTransitions++; } else { recordPtr = record; recordPtr->recordData = recordData; } /* now log this transition */ log = sm->type->logCallback; if ( log != NULL ) { status = log( recordPtr ); } else { /* empty list, just log the event and the state transition */ FM_LOG_DEBUG( FM_LOG_CAT_STATE_MACHINE, "Event %d occurred on State Machine %d of type %d - " "Current State is %d, Next State is %d\n", record->eventInfo.eventId, sm->smUserID, record->eventInfo.smType, record->currentState, record->nextState ); status = FM_OK; } return status; } /* end SaveTransitionRecord */ /*****************************************************************************/ /** InitRecordDataPtr * \ingroup intStateMachine * * \desc This function initializes the data pointer for the state * transition records * * \param[in] sm is the pointer to a caller-allocated structure * representing the state machine instance whose history * buffer needs to be initialized * * \param[in] historySize is the number of records of the transition * history buffer for this state machine. * * \return FM_OK *****************************************************************************/ static fm_status InitRecordDataPtr( fm_stateMachine *sm, fm_int historySize ) { fm_int idx; fm_byte *bufStart; fm_smTransitionRecord *record; record = sm->smTransitionHistory; bufStart = sm->recordData; /* the event data buffers are attched to the transition record array */ for ( idx = 0 ; idx < historySize ; idx++ ) { record->recordData = bufStart; bufStart++; record++; } return FM_OK; } /* end InitRecordDataPtr */ /*****************************************************************************/ /** SaveEventTime * \ingroup intStateMachine * * \chips FM10000 * * \desc Save the event time depending on the desired value of the * GSME timestamp mode * * \param[in] sm is the pointer to a caller-allocated structure * representing the state machine instance for which where * this transition record needs to be saved. * * \param[in,out] ts is the pointer to a caller allocated variable where * this function will return the event timestamp * * \return FM_OK * * \return FM_ERR_INVALID_ARGUMENT if an input pointer is NULL * *****************************************************************************/ static fm_status SaveEventTime( fm_stateMachine *sm, fm_timestamp *ts ) { if ( sm == NULL || ts == NULL ) { return FM_ERR_INVALID_ARGUMENT; } fmGetTime( ts ); switch( smEngine.tsMode ) { case FM_GSME_TIMESTAMP_MODE_ABSOLUTE: break; case FM_GSME_TIMESTAMP_MODE_SINCE_CLEAR: fmSubTimestamps( ts, &sm->initTimeStamp, ts ); break; case FM_GSME_TIMESTAMP_MODE_SYSUPTIME: default: fmSubTimestamps( ts, &smEngine.initTime, ts ); break; } return FM_OK; } /* end SaveEventTime */ /***************************************************************************** * Public Functions *****************************************************************************/ /*****************************************************************************/ /** fmInitStateMachineEngine * \ingroup intStateMachine * * \desc Function to initialize the State Machine Engine * * \param[in] initTime pointer to a caller-allocated variable indicating * the timestamp to be used by GSME to timestamp events * * \param[in] mode is the event timestamping mode. See * ''fm_smTimestampMode'' * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_LOCK_INIT if unable to initialize lock. *****************************************************************************/ fm_status fmInitStateMachineEngine( fm_timestamp *initTime, fm_smTimestampMode mode ) { fm_status status; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "Initializing GSME\n" ); status = fmCreateLock( "GSME Lock", &smEngine.lock ); FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); smEngine.init = TRUE; smEngine.nrRegisteredTypes = 0; smEngine.refValue = 0; smEngine.smTypeHead = NULL; smEngine.smTypeTail = NULL; smEngine.smHead = NULL; smEngine.smTail = NULL; smEngine.initTime = *initTime; smEngine.tsMode = mode; ABORT: FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmInitStateMachineEngine() */ /*****************************************************************************/ /** fmRegisterStateTransitionTable * \ingroup intStateMachine * * \desc This function registers a new state machine type with * GSME by associating a caller-provided state machine type * with a State Transition Table. * * \param[in] smType is a reserved key used by the caller to uniquely * identify the state machine type * * \param[in] nrStates is number of states for this state machine. The * range of valid state IDs is assumed to be (0, nrStates-1) * * \param[in] nrEvents is number of events for this state machine. The * range of valid event IDs is assumed to be (0, nrEvents-1) * * \param[in] stt is a caller-provided array of nrStates * pointers each pointing to an array of nrEvents State * Transition descriptors for a given state. * * \param[in] log is a callback function used by the state * machine engine to print a log message describing the * transition * * \param[in] okIfRegistered is a boolean variable indicating how to * handle the case where this state transition table is * already registered: if TRUE the operation will be silently * skipped; if FALSE this function will return an error * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_NO_MEM if there was a memory allocation failure * while creating one of the state machine internal structures * * \return FM_ERR_STATE_MACHINE_TYPE if this state machine type is * already registered and okIfRegistered is set to FALSE *****************************************************************************/ fm_status fmRegisterStateTransitionTable( fm_int smType, fm_int nrStates, fm_int nrEvents, fm_smTransitionEntry **stt, fm_smTransitionLogCallback log, fm_bool okIfRegistered ) { fm_stateMachineType *type = NULL; fm_status status; fm_smTransitionEntry entry; fm_int i; fm_int j; fm_bool gsmeLockTaken = FALSE; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "smType=%d nrStates=%d nrEvents=%d " "smTransitionTable=%p\n", smType, nrStates, nrEvents, (void *)stt ); /* make sure it was initialized */ if ( smEngine.init != TRUE ) { status = FM_ERR_UNINITIALIZED; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* check the input arguments */ if ( ( stt == NULL ) || ( nrStates < 0 ) || ( nrEvents < 0 ) ) { status = FM_ERR_INVALID_ARGUMENT; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } FLAG_TAKE_GSME_LOCK(); /* see if this state machine type was already registered */ type = SearchRegisteredStateMachineTypes ( smType ); if ( type != NULL ) { if ( okIfRegistered ) { status = FM_OK; goto ABORT; } else { type = NULL; status = FM_ERR_STATE_MACHINE_TYPE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } } /* allocate memory for the state machine type */ type = ( fm_stateMachineType *)fmAlloc( sizeof(fm_stateMachineType) ); if ( type == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* allocate memory for the state transition table */ type->smTransitionTable = (fm_smTransitionEntry *)fmAlloc( sizeof(fm_smTransitionEntry) * nrStates * nrEvents ); if ( type->smTransitionTable == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* fill out the state machine type structure */ type->smType = smType; type->nrStates = nrStates; type->nrEvents = nrEvents; type->logCallback = log; /* now copy each entry one by one */ for (i = 0 ; i < nrStates ; i++) { for (j = 0 ; j < nrEvents ; j++) { /*********************************************************** * If this entry isn't initialized, populate it as follows: * - NextState is set to the current state (loop transition) * - action is set to the default action ***********************************************************/ entry = stt[i][j]; if ( entry.used == FALSE ) { entry.used = TRUE; entry.transitionCallback = NULL; entry.nextState = i; } GET_TABLE_ENTRY(type->smTransitionTable, i, j, nrEvents) = entry; } /* end for ( j = 0, ... ) */ } /* end for ( i = 0, ... ) */ /* Add this new registered state machine type to the list */ FM_DLL_INSERT_LAST( &smEngine, smTypeHead, smTypeTail, type, next, prev ); smEngine.nrRegisteredTypes++; /* if we got here, we're ok */ status = FM_OK; ABORT: if ( status != FM_OK && type != NULL ) { fmFree( type ); } if ( gsmeLockTaken ) { DROP_GSME_LOCK(); } FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmRegisterStateTransitionTable */ /*****************************************************************************/ /** fmUnregisterStateTransitionTable * \ingroup intStateMachine * * \desc This function removes the association between a state * machine type and the State Transition Table previously * created using ''fmRegisterStateTransitionTable'' * * \param[in] smType is a reserved key used by the caller to uniquely * identify the state machine type * * \param[in] skipIfUsed is a boolean variable indicating how to handle * the case where there is at least a state machine bound to * this state transition table: if TRUE the operation is * silently skipped; if FALSE this function will report an * error * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid *****************************************************************************/ fm_status fmUnregisterStateTransitionTable( fm_int smType, fm_bool skipIfUsed ) { fm_status status; fm_stateMachineType *type; fm_stateMachine *entry; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "smType=%d\n", smType ); /* make sure GSME is initializd */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); /* see if this state machine type was already registered */ type = SearchRegisteredStateMachineTypes( smType ); if ( type != NULL ) { /* see if there is an existing state machine bound to this type */ entry = SearchExistingStateMachinesByType( smType ); if ( entry != NULL ) { if ( skipIfUsed ) { status = FM_OK; goto ABORT; } /* yes there is one, flag it as an error */ status = FM_ERR_STATE_MACHINE_TYPE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* remove it from the list of state machine types */ smEngine.nrRegisteredTypes--; FM_DLL_REMOVE_NODE( &smEngine, smTypeHead, smTypeTail, type, next, prev ); /**************************************************** * free the memory allocated for the state transition * table and for the state machine type object itself ****************************************************/ fmFree( type->smTransitionTable ); fmFree( type ); /* if we got here, we're ok */ status = FM_OK; } /* end if ( type != NULL ) */ else { /* not a registered state machine type, silently ignore it */ status = FM_OK; } ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmUnregisterStateTransitionTable */ /*****************************************************************************/ /** fmCreateStateMachine * \ingroup intStateMachine * * \desc This function creates a state machine by allocating all * necessary internal data structures. For the state * machine to become operational, it needs to be bound to * a registered state transition table by calling * ''fmStartStateMachine'' * * \param[in] smUserID The user's ID for this state machine * * \param[in] historySize is the size of the transition history buffer * for this state machine. The state machine engine will keep * a buffer containing the most recente historySize entries of * type ''fm_smTransitionRecord'' for a given state machine. * It can be zero to disable transition logging * * \param[in] recordDataSize is the size in bytes of the caller-provided * event data buffer that is saved by GSME in a transition * record upon event notification * * \param[out] handle is a pointer to a caller-allocated area where this * function will return a handle for this state machine. The * caller is required to use this handle for any subsequent * operation on the same state machine * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_NO_MEM if there was a memory allocation failure * while creating one of the state machine internal structures *****************************************************************************/ fm_status fmCreateStateMachine( fm_int smUserID, fm_int historySize, fm_int recordDataSize, fm_smHandle *handle ) { fm_status status; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "smUserID=%d historySize=%d\n", smUserID, historySize ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); status = CreateStateMachine( smUserID, historySize, recordDataSize, handle ); DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmCreateStateMachine */ /*****************************************************************************/ /** fmStartStateMachine * \ingroup intStateMachine * * \desc This function puts a state machine in operational state by * binding it to a registered state machine type (and * therefore to a State Machine Transition table) and by * setting its initial state * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \param[in] smType is a reserved key used by the caller to uniquely * identify the state machine type * * \param[in] initState is the initial state for this state machine * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine * * \return FM_ERR_STATE_MACHINE_TYPE if the specified type does not * appear to have been registered * * \return FM_ERR_BOUND_STATE_MACHINE if the operation is attempted * on a state machine that is already bound to a state * transition type. ''fmStopStateMachine'' should be * invoked before a new binding can be created. *****************************************************************************/ fm_status fmStartStateMachine( fm_smHandle handle, fm_int smType, fm_int initState ) { fm_status status; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p initState=%d\n", (void *)handle, initState ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); status = StartStateMachine( handle, smType, initState ); DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmStartStateMachine */ /*****************************************************************************/ /** fmCreateAndStartStateMachine * \ingroup intStateMachine * * \desc This function creates a state machine by allocating all * necessary internal data structure and then puts it in * an operational state, by binding it to a registered * state transition table * * \param[in] smUserID The user's ID for this state machine * * \param[in] historySize is the size of the transition history buffer * for this state machine. The state machine engine will keep * a buffer containing the most recente historySize entries of * type ''fm_smTransitionRecord'' for a given state machine. * It can be zero to disable transition logging * * \param[in] recordDataSize is the size in bytes of the caller-provided * event data buffer that is saved by GSME in a transition * record upon event notification * * \param[in] smType is a reserved key used by the caller to uniquely * identify the state machine type * * \param[in] initState is the initial state for this state machine * * \param[out] handle is a pointer to a caller-allocated area where this * function will return a handle for this state machine. The * caller is required to use this handle for any subsequent * operation on the same state machine * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_NO_MEM if there was a memory allocation failure * while creating one of the state machine internal structures *****************************************************************************/ fm_status fmCreateAndStartStateMachine( fm_int smUserID, fm_int historySize, fm_int recordDataSize, fm_int smType, fm_int initState, fm_smHandle *handle ) { fm_status status; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "smUserID=%d historySize=%d recordDataSize=%d " "smType=%d initState=%d\n", smUserID, historySize, recordDataSize, smType, initState ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); status = CreateStateMachine( smUserID, historySize, recordDataSize, handle ); FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); status = StartStateMachine( *handle, smType, initState ); ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmCreateAndStartStateMachine */ /*****************************************************************************/ /** fmStopStateMachine * \ingroup intStateMachine * * \desc This function puts a state machine in a non-operational * state by unbinding it from the current transition table. * Existing internal state information and the transition * history buffer are also cleared * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine *****************************************************************************/ fm_status fmStopStateMachine( fm_smHandle handle ) { fm_status status; fm_stateMachine *sm; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p\n", (void *)handle ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); /* consistency check on the handle and other input arguments */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* unbind it from the registered type */ sm->type = NULL; status = FM_OK; ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmStopStateMachine */ /*****************************************************************************/ /** fmDeleteStateMachine * \ingroup intStateMachine * * \desc This function deletes an existing state machine. If the * state machine was not explicitely stopped by the caller * using ''fmStopStateMachine'', it will be stopped by this * function before the state machine is deleted * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine *****************************************************************************/ fm_status fmDeleteStateMachine( fm_smHandle handle ) { fm_status status; fm_stateMachine *sm; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p\n", (void *)handle ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); /* consistency check on the handle */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } sm->smMagicNumber = 0; /* remove it from the linked list of state machines */ FM_DLL_REMOVE_NODE( &smEngine, smHead, smTail, sm, next, prev ); /* Free the memory allocated to this entry */ if ( sm->smTransitionHistory != NULL ) { fmFree( sm->smTransitionHistory ); } if ( sm->recordData != NULL ) { fmFree( sm->recordData ); } fmFree( sm ); status = FM_OK; ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmDeleteStateMachine */ /*****************************************************************************/ /** fmNotifyStateMachineEvent * \ingroup intStateMachine * * \desc This function notifies the state machine engine that * a specified event has occurred on a given state machine * and it should be processed accordingly. * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \param[in] eventInfo is a pointer to a caller-allocated area * containing the generic event descriptor. * * \param[in] userInfo is a pointer to a caller-allocated containing * purpose-specific event info * * \param[in] recordData is a pointer to a caller-allocated area * containing purpose-specific data that the caller wants GSME * to save in the transition record. The amount of data to be * saved must be indicated by the caller when the State * Machine instance is created using ''fmCreateStateMachine'' * or ''fmCreateAndStartStateMachine'' * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine * * \return FM_ERR_STATE_MACHINE_TYPE the state machine type * indicated in the event info header does not match that of * the referred state machine *****************************************************************************/ fm_status fmNotifyStateMachineEvent( fm_smHandle handle, fm_smEventInfo *eventInfo, void *userInfo, void *recordData ) { fm_status status; fm_stateMachine *sm; fm_smTransitionEntry entry; fm_smTransitionCallback transition; fm_smConditionCallback condition; fm_smTransitionRecord record; fm_int nextState; fm_bool gsmeLockTaken = FALSE; fm_bool callerLockTaken = FALSE; fm_int smType; fm_uint32 refValue; fm_int precedence; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p eventInfo=%p\n", (void *)handle, (void *)eventInfo ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } /* check the event pointer */ if ( eventInfo == NULL ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_INVALID_ARGUMENT ); } /* make sure the caller's lock is valid and NOT a super-precedence lock */ status = fmGetLockPrecedence( eventInfo->lock, &precedence ); if ( status != FM_OK ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } if ( precedence == FM_LOCK_SUPER_PRECEDENCE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_INVALID_ARGUMENT ); } FLAG_TAKE_CALLER_LOCK( eventInfo ); FLAG_TAKE_GSME_LOCK( ); /* consistency check on the handle */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* make sure there's no state machine type mismatch */ if ( sm->type == NULL || eventInfo->smType != sm->type->smType || eventInfo->eventId >= sm->type->nrEvents ) { status = FM_ERR_STATE_MACHINE_TYPE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* save the current reference value and state machine type to perform a consistency check later on */ smType = sm->type->smType; refValue = sm->smRefValue; /* before calling any action or condition, make sure this flag is * set to its default value, which is FALSE*/ eventInfo->dontSaveRecord = FALSE; /* retrieve the State Transition Table entry */ entry = GET_TABLE_ENTRY( sm->type->smTransitionTable, sm->curState, eventInfo->eventId, sm->type->nrEvents ); /* * Save event timestamp here. * Further processing may request another events which would be saved * with earlier time. */ SaveEventTime( sm, &record.eventTime ); if ( entry.nextState == FM_STATE_UNSPECIFIED && entry.conditionCallback != NULL ) { condition = entry.conditionCallback; /* by default, nextState is set to the current state */ nextState = sm->curState; /* drop the GSME lock to allow the callback to use other locks */ FLAG_DROP_GSME_LOCK(); status = condition( eventInfo, userInfo, &nextState ); FLAG_TAKE_GSME_LOCK(); } else { /* retrieve the transition callback */ transition = entry.transitionCallback; /* assume it'll be ok unless the transition callback tell us otherwise */ status = FM_OK; nextState = entry.nextState; /* default action if the action list is empty */ if ( transition != NULL ) { /* drop the GSME lock to allow the callback to use other locks */ FLAG_DROP_GSME_LOCK(); status = transition( eventInfo, userInfo ); FLAG_TAKE_GSME_LOCK(); } } /* we may have release the GSME lock temporarily, make sure the state machine instance is still valid and nothing changed meanwhile */ if ( ( sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) || ( sm->smRefValue != refValue ) || ( sm->type == NULL ) || ( sm->type->smType != smType ) ) { FM_LOG_DEBUG( FM_LOG_CAT_STATE_MACHINE, "State Machine Instance modified during transition: " "sm->magicNumber=0x%08x " "sm->smRefValue=%d refValue=%d " "sm->type=%p sm->type->smType=%d smType=%d\n", sm->smMagicNumber, sm->smRefValue, refValue, (void *)sm->type, (sm->type ? sm->type->smType : -1 ), smType ); status = FM_OK; goto ABORT; } /* make sure the next state is valid */ if ( nextState < 0 || nextState >= sm->type->nrStates ) { status = FM_ERR_STATE_MACHINE_TYPE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* Check dontSaveRecord flag, it may been modified (set to TRUE) * by an action or condition in order to not record the current * transaction. Note that this flag is always restored to its * default value when a new event is notified and processed */ if (eventInfo->dontSaveRecord == FALSE) { /* fill out the transition record */ record.eventInfo = *eventInfo; record.currentState = sm->curState; record.status = status; record.smUserID = sm->smUserID; if ( status == FM_OK ) { record.nextState = nextState; } else { record.nextState = sm->curState; } /* Save this transition record */ SaveTransitionRecord( sm, &record, recordData ); } if ( status == FM_OK ) { sm->curState = nextState; } ABORT: if ( gsmeLockTaken ) { DROP_GSME_LOCK(); } if ( callerLockTaken ) { DROP_CALLER_LOCK( eventInfo ); } FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmNotifyStateMachineEvent */ /*****************************************************************************/ /** fmGetStateMachineCurrentState * \ingroup intStateMachine * * \desc This function returns the current state for a given state * machine * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \param[out] state is a pointer to a caller-allocated area * where this function will return the current state * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine * * \return FM_ERR_STATE_MACHINE_TYPE the state machine isn't bound * to any registered type *****************************************************************************/ fm_status fmGetStateMachineCurrentState( fm_smHandle handle, fm_int *state ) { fm_status status; fm_stateMachine *sm; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p\n", (void *)handle ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); /* consistency check on the handle */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* make sure this state machine is currently bound */ if ( sm->type == NULL ) { status = FM_ERR_STATE_MACHINE_TYPE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } *state = sm->curState; status = FM_OK; ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ) } /* end fmGetStateMachineCurrentState */ /*****************************************************************************/ /** fmGetStateTransitionHistory * \ingroup intStateMachine * * \desc This function returns the most recent N transitions * occurred on the specified state machine. N is the smallest * between the number of transitions kept in this state * machines transition history buffer and the size of the * caller-specified buffer * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \param[in,out] nrRecords is a pointer to a caller-allocated area * used by the caller to indicate the size of its transition * history buffer and by this function to return the number * of transitions records * * \param[in] records is a pointer to a caller-allocated area where * this function will return up to most recent *nrTransitions * transition records for this state machine * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_INVALID_ARGUMENT if one of the arguments is invalid * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine *****************************************************************************/ fm_status fmGetStateTransitionHistory( fm_smHandle handle, fm_int *nrRecords, fm_smTransitionRecord *records ) { fm_status status; fm_stateMachine *sm; fm_int idx; fm_int firstIdx; fm_int recordIdx; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p nrTransitions=%p transitions=%p\n", (void *)handle, (void *)nrRecords, (void *)records ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); /* consistency check on the handle */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* check the pointer arguments */ if ( nrRecords == NULL || records == NULL ) { status = FM_ERR_INVALID_ARGUMENT; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* has the transition history circular buffer wrapped at least once? */ if ( sm->nrTransitions > sm->transitionHistorySize ) { /* Yes, determine the index of the least recent transition */ *nrRecords = sm->transitionHistorySize; firstIdx = sm->nrTransitions - sm->transitionHistorySize; firstIdx = firstIdx % sm->transitionHistorySize; } else { /* Not yet, the least recent transition is at location 0 */ *nrRecords = sm->nrTransitions; firstIdx = 0; } /* copy the transition records into the caller-provided buffer */ for ( idx = 0 ; idx < *nrRecords ; idx++ ) { recordIdx = ((firstIdx + idx) % sm->transitionHistorySize); *(records + idx) = sm->smTransitionHistory[recordIdx]; } /* Successful, if we got here */ status = FM_OK; ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmGetStateTransitionHistory */ /*****************************************************************************/ /** fmClearStateTransitionHistory * \ingroup intStateMachine * * \desc This function clears the transition history buffer for the * specified state machine * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine *****************************************************************************/ fm_status fmClearStateTransitionHistory( fm_smHandle handle ) { fm_status status; fm_stateMachine *sm; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p\n", (void *)handle ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); /* consistency check on the handle */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /************************************************** * No reason to check the state machine type. The * operation is harmless if it isn't bound **************************************************/ /* just clear the log */ status = ClearStateTransitionHistory( sm ); ABORT: DROP_GSME_LOCK(); FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmClearStateTransitionHistory */ /*****************************************************************************/ /** fmChangeStateTransitionHistorySize * \ingroup intStateMachine * * \desc This function changes the size of the transition history * buffer for the specified state machine. Up to N transitions * are preserved in the history buffer where N is the smallest * between historySize and the number of transitions kept in * the buffer when this function is invoked * * \param[in] handle is the handle for this state machine generated when * the state machine was created * * \param[in] historySize is the new size of the transition history * buffer * * \return FM_OK if the state machine was created successfully * * \return FM_ERR_STATE_MACHINE_HANDLE if the specified handle does * not correspond to a valid state machine * * \return FM_ERR_INVALID_ARGUMENT if the historySize is invalid * * \return FM_ERR_NO_MEM if memory allocation for the new history * buffer failed *****************************************************************************/ fm_status fmChangeStateTransitionHistorySize( fm_smHandle handle, fm_int historySize ) { fm_status status; fm_stateMachine *sm; fm_smTransitionRecord *newHistoryStart; fm_smTransitionRecord *oldHistoryStart; fm_smTransitionRecord *newRecord; fm_smTransitionRecord *oldRecord; fm_byte *newDataBuf; fm_int nrTransitions; fm_int firstFromIdx; fm_int idx; fm_int recordSize; fm_int bufferSize; FM_LOG_ENTRY( FM_LOG_CAT_STATE_MACHINE, "handle=%p historySize=%d\n", (void *)handle, historySize ); /* Make sure it's initialized */ if ( smEngine.init != TRUE ) { FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, FM_ERR_UNINITIALIZED ); } TAKE_GSME_LOCK(); newDataBuf = NULL; newHistoryStart = NULL; nrTransitions = 0; /* consistency check on the handle */ sm = (fm_stateMachine *)handle; if ( sm == NULL || sm->smMagicNumber != STATE_MACHINE_MAGIC_NUMBER ) { status = FM_ERR_STATE_MACHINE_HANDLE; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* Is the size valid? */ if ( historySize < 0 ) { status = FM_ERR_INVALID_ARGUMENT; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* Proceed only if the caller still wants to keep a transition history */ if ( historySize > 0 ) { recordSize = historySize * sizeof( fm_smTransitionRecord ); newHistoryStart = fmAlloc( recordSize ); if ( newHistoryStart == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* clear the new buffer */ FM_MEMSET_S( newHistoryStart, recordSize, 0, recordSize ); bufferSize = historySize * sm->recordDataSize; newDataBuf = fmAlloc( bufferSize ); if ( newDataBuf == NULL ) { status = FM_ERR_NO_MEM; FM_LOG_ABORT_ON_ERR( FM_LOG_CAT_STATE_MACHINE, status ); } /* clear the new buffer */ FM_MEMSET_S( newDataBuf, bufferSize, 0, bufferSize ); /***************************************************** * determine how many transition records to keep *****************************************************/ /* is the old history buffer full? */ if ( sm->nrTransitions > sm->transitionHistorySize ) { /* yes */ nrTransitions = sm->transitionHistorySize; firstFromIdx = sm->nrTransitions - sm->transitionHistorySize; firstFromIdx = firstFromIdx % sm->transitionHistorySize; } else { /* not yet */ nrTransitions = sm->nrTransitions; firstFromIdx = 0; } /* Do not exceed the size of the new history buffer */ if ( nrTransitions > historySize ) { firstFromIdx += (nrTransitions - historySize); firstFromIdx = firstFromIdx % sm->transitionHistorySize; nrTransitions = historySize; } /******************************************************** * Copy all transition records that we want to keep ********************************************************/ oldHistoryStart = sm->smTransitionHistory; if ( oldHistoryStart != 0 ) { newRecord = newHistoryStart; for ( idx = 0 ; idx < nrTransitions ; idx++ ) { oldRecord = oldHistoryStart; oldRecord += ((firstFromIdx + idx) % sm->transitionHistorySize ); *newRecord = *oldRecord; newRecord->recordData = newDataBuf + sm->recordDataSize*idx; /* copy also the event-specific data */ FM_MEMCPY_S( newRecord->recordData, sm->recordDataSize, oldRecord->recordData, sm->recordDataSize ); newRecord++; } } } /* end if ( historySize > 0 ) */ /* Free the previous transition buffer, if any */ if ( sm->smTransitionHistory != NULL ) { fmFree( sm->smTransitionHistory ); } if ( sm->recordData != NULL ) { fmFree( sm->recordData ); } /* finalize the change by recording size and pointer of the new buffer */ sm->transitionHistorySize = historySize; if ( historySize > 0 ) { sm->smTransitionHistory = newHistoryStart; sm->recordData = newDataBuf; sm->nrTransitions = nrTransitions; } else { sm->smTransitionHistory = NULL; sm->recordData = NULL; sm->nrTransitions = 0; } /* Successful if we got here */ status = FM_OK; ABORT: DROP_GSME_LOCK(); if ( status != FM_OK ) { if ( newDataBuf != NULL ) { fmFree( newDataBuf ); } if ( newHistoryStart != NULL ) { fmFree( newHistoryStart ); } } FM_LOG_EXIT( FM_LOG_CAT_STATE_MACHINE, status ); } /* end fmChangeStateTransitionHistory */
joeleg/laconia
packages/laconia-test/src/laconiaTest.js
<filename>packages/laconia-test/src/laconiaTest.js const AWS = require("aws-sdk"); const laconiaInvoker = require("@laconia/invoker"); const LaconiaTester = require("./LaconiaTester"); const S3Spier = require("./S3Spier"); const defineUnavailableSpy = object => { Object.defineProperty(object, "spy", { get: () => { throw new Error( "spy is not enabled, check documentation to set the required options to enable this feature" ); } }); return object; }; const getSpyOptions = options => { return options.spy || {}; }; const isSpyOptionsSet = options => getSpyOptions(options).bucketName !== undefined; const createSpier = (functionName, options) => { return new S3Spier( options.spy.bucketName, functionName, options.spy.s3 || new AWS.S3() ); }; module.exports = (functionName, options = {}) => { const invoker = laconiaInvoker( functionName, options.lambda || new AWS.Lambda(), options ); invoker.requestLogs = true; const laconiaTester = new LaconiaTester(invoker); if (isSpyOptionsSet(options)) { laconiaTester.spy = createSpier(functionName, options); return laconiaTester; } else { return defineUnavailableSpy(laconiaTester); } };
HellSoft-Col/OPRS-Java-components
SideCarOPRS/src/co/edu/javeriana/dtos/PaymentResponseDTO.java
<gh_stars>0 /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package co.edu.javeriana.dtos; import java.io.Serializable; /** * * @author HellSoft */ public class PaymentResponseDTO implements Serializable{ private String numAprobacion; private String aprobacion; public PaymentResponseDTO() { this.numAprobacion = null; this.aprobacion = null; } public PaymentResponseDTO(String numAprobacion, String aprobacion) { this.numAprobacion = numAprobacion; this.aprobacion = aprobacion; } public String getNumAprobacion() { return numAprobacion; } public void setNumAprobacion(String numAprobacion) { this.numAprobacion = numAprobacion; } public String getAprobacion() { return aprobacion; } public void setAprobacion(String aprobacion) { this.aprobacion = aprobacion; } }
patel243/spring-data-cassandra
spring-data-cassandra/src/test/java/org/springframework/data/cassandra/core/mapping/CassandraCompositePrimaryKeyUnitTests.java
/* * Copyright 2016-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.data.cassandra.core.mapping; import static org.assertj.core.api.Assertions.*; import lombok.EqualsAndHashCode; import java.io.Serializable; import java.lang.reflect.Field; import java.util.Date; import java.util.List; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.springframework.data.cassandra.core.convert.SchemaFactory; import org.springframework.data.cassandra.core.cql.PrimaryKeyType; import org.springframework.data.cassandra.core.cql.keyspace.ColumnSpecification; import org.springframework.data.cassandra.core.cql.keyspace.CreateTableSpecification; import org.springframework.data.mapping.model.Property; import org.springframework.data.util.ClassTypeInformation; import org.springframework.util.ReflectionUtils; import com.datastax.oss.driver.api.core.type.DataTypes; /** * Unit tests for {@link BasicCassandraPersistentProperty} with a composite primary key class. * * @author <NAME> * @author <NAME> */ class CassandraCompositePrimaryKeyUnitTests { private CassandraMappingContext context; private SchemaFactory schemaFactory; private CassandraPersistentEntity<?> entity; private CassandraPersistentEntity<?> key; @BeforeEach void setup() { context = new CassandraMappingContext(); schemaFactory = new SchemaFactory(context, context.getCustomConversions(), context.getCodecRegistry()); entity = context.getRequiredPersistentEntity(ClassTypeInformation.from(TypeWithCompositeKey.class)); key = context.getRequiredPersistentEntity(ClassTypeInformation.from(CompositeKey.class)); } @Test // DATACASS-507 void validateMappingInfo() { Field field = ReflectionUtils.findField(TypeWithCompositeKey.class, "id"); CassandraPersistentProperty property = new BasicCassandraPersistentProperty( Property.of(ClassTypeInformation.from(TypeWithCompositeKey.class), field), entity, CassandraSimpleTypeHolder.HOLDER); assertThat(property.isIdProperty()).isTrue(); assertThat(property.isCompositePrimaryKey()).isTrue(); CreateTableSpecification spec = schemaFactory.getCreateTableSpecificationFor(entity); List<ColumnSpecification> partitionKeyColumns = spec.getPartitionKeyColumns(); assertThat(partitionKeyColumns).hasSize(1); ColumnSpecification partitionKeyColumn = partitionKeyColumns.get(0); assertThat(partitionKeyColumn.getName()).hasToString("z"); assertThat(partitionKeyColumn.getKeyType()).isEqualTo(PrimaryKeyType.PARTITIONED); assertThat(partitionKeyColumn.getType()).isEqualTo(DataTypes.TEXT); List<ColumnSpecification> clusteredKeyColumns = spec.getClusteredKeyColumns(); assertThat(clusteredKeyColumns).hasSize(1); ColumnSpecification clusteredKeyColumn = clusteredKeyColumns.get(0); assertThat(clusteredKeyColumn.getName()).hasToString("a"); assertThat(clusteredKeyColumn.getKeyType()).isEqualTo(PrimaryKeyType.CLUSTERED); assertThat(partitionKeyColumn.getType()).isEqualTo(DataTypes.TEXT); } @PrimaryKeyClass @EqualsAndHashCode static class CompositeKey implements Serializable { private static final long serialVersionUID = 1L; @PrimaryKeyColumn(ordinal = 0, type = PrimaryKeyType.PARTITIONED) String z; @PrimaryKeyColumn(ordinal = 1, type = PrimaryKeyType.CLUSTERED) String a; } @Table private static class TypeWithCompositeKey { @PrimaryKey CompositeKey id; Date time; @Column("message") String text; } }
jlanga/smsk_selection
src/guidance.v2.02/programs/semphy/nonActiveCode/RandomGenerator.h
<reponame>jlanga/smsk_selection<gh_stars>1-10 #ifndef RandomGenerator_h #define RandomGenerator_h // Marsaglia's subtractive R.N. generator with carry; combined with a weyl // generator. // Source: Computer Physics Communications 60 (1990) 345-349. // Written by <NAME>, 1992. // IMPLEMENTATION FILE #ifdef __GNUG__ #ifdef PRAGMA_TEMPLATES #pragma interface #endif #endif #include "Multinomial.h" #include <cmath> //CHANGE //#include "general.h" class tMarsagliaGenerator { public: tMarsagliaGenerator(unsigned query=0); void Initialize(unsigned start); unsigned long RandomLong() {return Next();} unsigned long RandomLong(unsigned long range) {return (Next() % range);} unsigned RandomInt(unsigned range) {return unsigned(Next() % range);} double RandomDouble(double range); private: unsigned long word1[44]; unsigned long weyl; int i,j,carry; unsigned long Next(); }; class tRandomGenerator : public tMarsagliaGenerator { public: // Sample Uniform (0,1) double SampleUniform(); // sample from a multinomial int SampleMultinomial( int n, double const* ); int sampleMulti(tMultinomial const & m ); vector<int> sampleGroup(int size,int groupSize); // Array containing log probabilities (the array is modified !) int SampleLogMultinomial( int n, double* ); // Sample from Normal distribution double SampleNormal(); double SampleGaussian(double mu, double prec); // Sample from a Gamma distribution double SampleGamma(double Alpha); double SampleGamma(double Alpha, double Beta); // Sample from a Dirichlet Distribution void SampleDirichlet(const vector<double>& rgalpha, vector<double>& rgprob); void SampleDirichlet(const vector<double>& mean, double precision, vector<double>& rgprob ); void SampleDirichlet( int n, double const*alpha, double* prob ); void SampleDirichlet( int n, double const* mean, double precision, double* prob ); void SampleDirichlet( int n, float const* mean, double precision, float* prob ); protected: double DblGammaGreaterThanOne(double dblAlpha); double DblGammaLessThanOne(double dblAlpha); }; extern tRandomGenerator _RandomProbGenerator; inline double tRandomGenerator::SampleGaussian(double mu, double prec) { double sigma = 1/sqrt(prec); double x= SampleNormal()*sigma + mu; #ifdef LOG cerr << "SampleGaussian(" << mu << " " << sigma << ") = " << x << "\n"; #endif return x; } inline double tRandomGenerator::SampleGamma(double Alpha, double Beta) { double x= SampleGamma(Alpha)/Beta; #ifdef LOG cerr << "SampleGamma(" << Alpha << " " << Beta << ") = " << x << "\n"; #endif return x; } inline void tRandomGenerator::SampleDirichlet(const vector<double>& rgalpha, vector<double>& rgprob) { SampleDirichlet( rgalpha, 1.0, rgprob ); } inline void tRandomGenerator::SampleDirichlet( int n, double const* alpha, double * prob ) { SampleDirichlet( n, alpha, 1.0, prob ); } #endif
itzrexmodz/Carla-1
neko/modules/sql/chats_sql.py
from sqlalchemy import Column, String from . import BASE, SESSION class Chats(BASE): __tablename__ = "chats" chat_id = Column(String(14), primary_key=True) def __init__(self, chat_id): self.chat_id = chat_id Chats.__table__.create(checkfirst=True) def add_chat(chat_id: str): nightmoddy = Chats(str(chat_id)) SESSION.add(nightmoddy) SESSION.commit() def rmchat(chat_id: str): rmnightmoddy = SESSION.query(Chats).get(str(chat_id)) if rmnightmoddy: SESSION.delete(rmnightmoddy) SESSION.commit() def get_all_chat_id(): stark = SESSION.query(Chats).all() SESSION.close() return stark def is_chat(chat_id: str): try: s__ = SESSION.query(Chats).get(str(chat_id)) if s__: return str(s__.chat_id) finally: SESSION.close()
visit-dav/vis
src/avt/DBAtts/MetaData/avtSubsetsMetaData.h
<reponame>visit-dav/vis // Copyright (c) Lawrence Livermore National Security, LLC and other VisIt // Project developers. See the top-level LICENSE file for dates and other // details. No copyright assignment is required to contribute to VisIt. #ifndef AVTSUBSETSMETADATA_H #define AVTSUBSETSMETADATA_H #include <dbatts_exports.h> #include <string> #include <avtVarMetaData.h> #include <NameschemeAttributes.h> #include <vector> #include <algorithm> #include <NameschemeAttributes.h> // **************************************************************************** // Class: avtSubsetsMetaData // // Purpose: // Information about a particular category of subsets of a mesh (even for material subsets) // // Notes: Autogenerated by xml2atts. // // Programmer: xml2atts // Creation: omitted // // Modifications: // // **************************************************************************** class DBATTS_API avtSubsetsMetaData : public avtVarMetaData { public: enum PartialCellModes { Include, Exclude, Dissect }; enum DecompMode { None, Cover, Partition }; // These constructors are for objects of this class avtSubsetsMetaData(); avtSubsetsMetaData(const avtSubsetsMetaData &obj); protected: // These constructors are for objects derived from this class avtSubsetsMetaData(private_tmfs_t tmfs); avtSubsetsMetaData(const avtSubsetsMetaData &obj, private_tmfs_t tmfs); public: virtual ~avtSubsetsMetaData(); virtual avtSubsetsMetaData& operator = (const avtSubsetsMetaData &obj); virtual bool operator == (const avtSubsetsMetaData &obj) const; virtual bool operator != (const avtSubsetsMetaData &obj) const; private: void Init(); void Copy(const avtSubsetsMetaData &obj); public: virtual const std::string TypeName() const; virtual bool CopyAttributes(const AttributeGroup *); virtual AttributeSubject *CreateCompatible(const std::string &) const; virtual AttributeSubject *NewInstance(bool) const; // Property selection methods virtual void SelectAll(); void SelectCatName(); void SelectNameScheme(); void SelectSetsToChunksMaps(); void SelectGraphEdges(); // Property setting methods void SetCatName(const std::string &catName_); void SetCatCount(int catCount_); void SetNameScheme(const NameschemeAttributes &nameScheme_); void SetSetsToChunksMaps(const intVector &setsToChunksMaps_); void SetGraphEdges(const intVector &graphEdges_); // Property getting methods const std::string &GetCatName() const; std::string &GetCatName(); int GetCatCount() const; const NameschemeAttributes &GetNameScheme() const; NameschemeAttributes &GetNameScheme(); const intVector &GetSetsToChunksMaps() const; intVector &GetSetsToChunksMaps(); const intVector &GetGraphEdges() const; intVector &GetGraphEdges(); // Enum conversion functions static std::string PartialCellModes_ToString(PartialCellModes); static bool PartialCellModes_FromString(const std::string &, PartialCellModes &); protected: static std::string PartialCellModes_ToString(int); public: static std::string DecompMode_ToString(DecompMode); static bool DecompMode_FromString(const std::string &, DecompMode &); protected: static std::string DecompMode_ToString(int); public: // Keyframing methods virtual std::string GetFieldName(int index) const; virtual AttributeGroup::FieldType GetFieldType(int index) const; virtual std::string GetFieldTypeName(int index) const; virtual bool FieldsEqual(int index, const AttributeGroup *rhs) const; // User-defined methods void Print(ostream &, int = 0) const; void AddGraphEdge(int head, int tail); void SetChunksForSet(int setId, std::vector<int> &chunks); avtSubsetsMetaData(const char *catName, int catCount, int maxTopoDim); avtSubsetsMetaData(const std::string &catName, int catCount, int maxTopoDim); void SetChunksForSet(int setId, const int *chunks, int len); // IDs that can be used to identify fields in case statements enum { ID_catName = avtVarMetaData::ID__LAST, ID_catCount, ID_nameScheme, ID_colorScheme, ID_setsToChunksMaps, ID_graphEdges, ID_isChunkCat, ID_isMaterialCat, ID_isUnionOfChunks, ID_hasPartialCells, ID_decompMode, ID_maxTopoDim, ID__LAST }; public: stringVector colorScheme; bool isChunkCat; bool isMaterialCat; bool isUnionOfChunks; bool hasPartialCells; int decompMode; int maxTopoDim; private: std::string catName; int catCount; NameschemeAttributes nameScheme; intVector setsToChunksMaps; intVector graphEdges; // Static class format string for type map. static const char *TypeMapFormatString; static const private_tmfs_t TmfsStruct; }; #define AVTSUBSETSMETADATA_TMFS AVTVARMETADATA_TMFS "sias*i*i*bbbbii" #endif
RussellChamp/cover-api
application/api/query.go
<gh_stars>0 package api import ( "strconv" "strings" "github.com/gobuffalo/buffalo" ) // Query contains criteria to limit the results of List endpoints type Query struct { // filterKeys is a map of field name to filter text. filterKeys map[string]string // searchText is text to search across multiple fields searchText string // recordLimit sets the number of records returned in a single page. Minimum is 1, maximum is 50 recordLimit int } func (q Query) Limit() int { l := q.recordLimit if l < 1 { l = 1 } if l > 50 { l = 50 } return q.recordLimit } func (q Query) Filter(key string) string { return q.filterKeys[key] } func (q Query) Search() string { return q.searchText } // NewQuery parses query string parameter values into valid query criteria. // // Example: // "filter=name:John,description:MacBook" becomes Query{filterKeys: // map[string]string{"name":"John","description":"MacBook"}} func NewQuery(values buffalo.ParamValues) Query { q := Query{recordLimit: 10, filterKeys: map[string]string{}} q.searchText = values.Get("search") if filter := values.Get("filter"); filter != "" { pairs := strings.Split(strings.TrimSpace(filter), ",") for _, p := range pairs { split := strings.SplitN(p, ":", 2) if len(split) == 2 { q.filterKeys[strings.TrimSpace(split[0])] = strings.TrimSpace(split[1]) } } } if limit := values.Get("limit"); limit != "" { i, err := strconv.Atoi(strings.TrimSpace(limit)) if err == nil { q.recordLimit = i } } return q }
mamontov-cpp/saddy
tools/ifaceed/ifaceed/gui/actions/labelactions.h
<gh_stars>10-100 /*! \file labelactions.h Describes a group of actions, linked to label */ #pragma once #include <QObject> #include <input/events.h> #include "abstractactions.h" class MainPanel; namespace history { class Command; } namespace sad { class SceneNode; } namespace gui { namespace actions { /*! A group of actions, linked to labels */ class LabelActions: public QObject, public gui::actions::AbstractActions { Q_OBJECT public: /*! A command maker callback */ typedef history::Command* (gui::actions::LabelActions::*CommandMaker)(sad::SceneNode* node, unsigned int old_value, unsigned int new_value); /*! Creates new label actions \param[in] parent a parent object */ LabelActions(QObject* parent = nullptr); /*! This class could be inherited */ virtual ~LabelActions(); /*! Cancels adding label state, enters previous state */ void cancelAddLabel(); /*! Moves label to a mouse position \param[in] e event */ void moveLabel(const sad::input::MouseMoveEvent& e); /*! Commits label adding to a scene \param[in] e event */ void commitLabelAdd(const sad::input::MousePressEvent& e); public slots: /*! Adds new label to a scene, if all parameters in form are valid */ void addLabel(); /*! Handles label font changes for an editable node \param[in] s a new resource name */ void labelFontChanged(sad::String s); /*! * Handles label size changes for an editable node * \param[in] new_size new label size */ void labelSizeChanged(unsigned int new_size); /*! * Called, when user types in label text */ void labelTextChanged(); /*! * Handles label line spacing changes * \param[in] new_value a new value */ void labelLineSpacingChanged(double new_value); /*! Handles change of label's maximal line width * \param[in] new_value a new value for property */ void labelMaximalLineWidthChanged(int new_value); /*! Handles change of break text parameter for label * \param[in] new_value a new value for property */ void labelBreakTextChanged(int new_value); /*! Handles change of how overflow of line should be handled * \param[in] new_value a new value for property */ void labelOverflowStrategyChanged(int new_value); /*! Handles change of where suspension sign should be placed * \param[in] new_value a new value for property */ void labelTextEllipsisChanged(int new_value); /*! Handles change of label's maximal lines count * \param[in] new_value a new value for property */ void labelMaximalLinesCountChanged(int new_value); /*! Handles change of how overflow of text should be handled * \param[in] new_value a new value for property */ void labelOverflowStrategyForLinesChanged(int new_value); /*! Handles change of where suspension sign should be placed in case, when multiple lines overflow * \param[in] new_value a new value for property */ void labelTextEllipsisForLinesChanged(int new_value); /*! Handles attempt to change "has formatting" property, disabling or enabling formatting in * corresponding object * \param[in] new_value a new value for formatting */ void labelHasFormattingChanged(bool new_value); private: /*! Performs property change, related to value \param[in] new_value a new value for property \param[in] prop property value \param[in] maker a maker command */ void unsignedIntPropertyChanged( int new_value, const sad::String& prop, gui::actions::LabelActions::CommandMaker maker ); /*! Returns command, related to label actions \param[in] node a node value \param[in] old_value an old value for property \param[in] new_value \return command */ template< typename T > // ReSharper disable once CppMemberFunctionMayBeStatic history::Command* command(sad::SceneNode* node, unsigned int old_value, unsigned int new_value) { return new T(node, old_value, new_value); } }; } }
NunoEdgarGFlowHub/marathon
src/test/scala/mesosphere/marathon/integration/InfoIntegrationTest.scala
<reponame>NunoEdgarGFlowHub/marathon<gh_stars>0 package mesosphere.marathon.integration import mesosphere.marathon.integration.setup._ import org.scalatest.{ GivenWhenThen, Matchers } class InfoIntegrationTest extends IntegrationFunSuite with SingleMarathonIntegrationTest with GivenWhenThen with Matchers { test("v2/info returns the right values") { When("fetching the info") val response = marathon.info Then("the response should be successful") response.code should be (200) val info = response.entityJson And("the http port should be correct") (info \ "http_config" \ "http_port").as[Int] should be (config.marathonBasePort) And("the ZooKeeper info should be correct") (info \ "zookeeper_config" \ "zk").as[String] should be (config.zk) And("the mesos master information should be correct") (info \ "marathon_config" \ "master").as[String] should be (config.master) And("the request should always be answered by the leader") (info \ "elected").as[Boolean] should be (true) And("the leader value in the JSON should match the one in the HTTP headers") val headerLeader = response.originalResponse.headers.find(_.name.equals("X-Marathon-Leader")).get.value.replace("http://", "") (info \ "leader").as[String] should be (headerLeader) And("the leader should match the value returned by /v2/leader") (info \ "leader").as[String] should be (marathon.leader().value.leader) } }
uc-seng302-rubber-ducks/organs_for_ducks
server/src/main/java/odms/security/WebSecurityConfig.java
package odms.security; import org.springframework.context.annotation.Configuration; import org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity; import org.springframework.security.config.annotation.web.builders.HttpSecurity; import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter; import org.springframework.security.web.authentication.AbstractAuthenticationProcessingFilter; import org.springframework.security.web.authentication.AnonymousAuthenticationFilter; import org.springframework.security.web.util.matcher.AndRequestMatcher; import org.springframework.security.web.util.matcher.AntPathRequestMatcher; import org.springframework.security.web.util.matcher.NegatedRequestMatcher; @Configuration @EnableWebSecurity @EnableGlobalMethodSecurity(prePostEnabled = true) public class WebSecurityConfig extends WebSecurityConfigurerAdapter { /** * This is where all security for the server is set up. all endpoints are protected, unless stated. * * @param http HttpSecurity object to add custom filters to * @throws Exception generic exception thrown during setup */ @Override protected void configure(HttpSecurity http) throws Exception { http.addFilterBefore(createCustomFilter(), AnonymousAuthenticationFilter.class).csrf().disable(); } protected AbstractAuthenticationProcessingFilter createCustomFilter() throws Exception { AuthFilter filter = new AuthFilter(new NegatedRequestMatcher( new AndRequestMatcher( //list any unprotected endpoints here //it can end with /** as a wildcard new AntPathRequestMatcher("/login") //, new AntPathRequestMatcher("/myEndpoint") ) )); filter.setAuthenticationManager(authenticationManagerBean()); return filter; } }
depaul-dice/provenance-to-use
criu/compel/test/fdspy/victim.c
#include <unistd.h> int main(int argc, char **argv) { int i, aux; do { i = read(0, &aux, 1); } while (i > 0); return 0; }
dasec/ForTrace
utils/noise-filter/noise_filter.py
# Copyright (C) 2020 <NAME> # This is a proof of concept in order to reduce the noise e.g. created by telemetry of the operating system or the used # browser to reduce the traffic dump created by fortrace to contain as much packets related to the actual application # (e.g. a video stream from youtube.com). # Our motivation is to use fortrace to create datasets as close as possible to the VPN-nonVPN dataset (ISCXVPN2016) # (https://www.unb.ca/cic/datasets/vpn.html) used by multiple traffic/application classification machine learning # models. The noise reduction approach of the authors is to simply keep open only the required applications to capture # the wanted traffic: "To facilitate the labeling process, when capturing the traffic all unnecessary services and # applications were closed. (The only application executed was the objective of the capture, e.g., Skype voice-call, # SFTP file transfer, etc.) While this obviously the first and most straightforward approach to the reduce the noise, # the operating system, the browser and other background services (e.g. for telemetry) continuously generate traffic, # which is not related to the actual application at all. As a result we try to further enhance our traffic dumps by # filtering for known noise sources as an additional pre-processing step before using our generated datasets. # PyShark is too slow and scapy is bloated (even worse), as a result this PoC is not intended for productive use on # big (i.e. >1GB) .pcap dumps. # If you are going to rewrite this script, do it like this: Don't use Python, use Rust and a fitting .pcap parsing # library such as pcap-parser based on Nom. This allows you to: # 1. Work on a stream of memory chunks instead of loading the whole dump (!) into memory at once. # 2. Use parallelism to increase the performance tremendously - the simple algorithm shown here (identify DNS requests # to the blacklisted IPs and store the (resolved) IPs is splittable into chunks and parallelizable. Filtering the # complete dump on unwanted protocols and the blacklisted source/destination IPs afterwards is obviously parallelizable # as well. import pyshark import argparse def load_blacklist(): with open('blacklist.txt') as f: return f.read().splitlines() def resolve_dns_requests(blacklist, pcap): ips = set() cap = pyshark.FileCapture(pcap, display_filter='dns') for i, packet in enumerate(cap): if 'a' in packet.dns.field_names: if packet.dns.qry_name in blacklist: print(f'[~] Blacklisting {packet.dns.qry_name} @ {packet.dns.a}.') ips.add(packet.dns.a) cap.close() return ips def apply_filter(pcap, ips, keep_dns, keep_arp, keep_icmp, keep_dhcp): display_filter = '' for ip in ips: display_filter += f'!(ip.addr == {ip}) and ' display_filter = display_filter[:-4] if not keep_arp: display_filter += 'and !arp ' if not keep_dhcp: display_filter += 'and !dhcp and !dhcpv6 ' if not keep_icmp: display_filter += 'and !icmp and !icmpv6 ' if not keep_dns: display_filter += 'and !dns and !llmnr and !mdns and !nbns ' # There is a lot for additional noise generated by the operating system. For the moment, we will just filter these # as well. display_filter += 'and !nfs and !ssdp and !mount and !igmp and !nlm and !portmap and !stp and !browser' # A little bit hacky to modify our dump by applying a display filter, but pyshark does not expose any functionality # to modify loaded file dumps and scapy is way to slow and bloated to be used for anything but interactive sessions. # And yes, BPF filters are faster than a display filter, but they are not available for file captures. # https://github.com/KimiNewt/pyshark/issues/69 capture = pyshark.FileCapture(pcap, output_file='filtered.pcap', display_filter=display_filter) capture.load_packets() def main(): # Parse command line arguments. parser = argparse.ArgumentParser(description='Haystack dump and config validator.') parser.add_argument('pcap', type=str, help='path to the .pcap file') parser.add_argument('--keep-dns', help='remove dns requests', default=False, required=False, action='store_true') parser.add_argument('--keep-arp', help='remove arp requests', default=False, required=False, action='store_true') parser.add_argument('--keep-icmp', help='remove icmp requests', default=False, required=False, action='store_true') parser.add_argument('--keep-dhcp', help='remove dhcp requests', default=False, required=False, action='store_true') args = parser.parse_args() blacklist = load_blacklist() print(f'[+] Loaded {len(blacklist)} domains from the blacklist.') noisy_ips = resolve_dns_requests(blacklist, args.pcap) print(f'[+] Identified noisy {len(noisy_ips)} IPs (e.g. telemetry requests).') apply_filter(args.pcap, noisy_ips, args.keep_dns, args.keep_arp, args.keep_icmp, args.keep_dhcp) print('[+] Successfully filtered noise from the .pcap dump.') if __name__ == '__main__': main()
SanojPunchihewa/devstudio-tooling-esb
plugins/org.wso2.developerstudio.eclipse.gmf.esb/src/org/wso2/developerstudio/eclipse/gmf/esb/EnrichSourceType.java
/** * <copyright> * </copyright> * * $Id$ */ package org.wso2.developerstudio.eclipse.gmf.esb; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.eclipse.emf.common.util.Enumerator; /** * <!-- begin-user-doc --> * A representation of the literals of the enumeration '<em><b>Enrich Source Type</b></em>', * and utility methods for working with them. * <!-- end-user-doc --> * @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getEnrichSourceType() * @model * @generated */ public enum EnrichSourceType implements Enumerator { /** * The '<em><b>CUSTOM</b></em>' literal object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #CUSTOM_VALUE * @generated * @ordered */ CUSTOM(0, "CUSTOM", "custom"), /** * The '<em><b>ENVELOPE</b></em>' literal object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #ENVELOPE_VALUE * @generated * @ordered */ ENVELOPE(1, "ENVELOPE", "envelope"), /** * The '<em><b>BODY</b></em>' literal object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #BODY_VALUE * @generated * @ordered */ BODY(2, "BODY", "body"), /** * The '<em><b>PROPERTY</b></em>' literal object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #PROPERTY_VALUE * @generated * @ordered */ PROPERTY(3, "PROPERTY", "property"), /** * The '<em><b>INLINE</b></em>' literal object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #INLINE_VALUE * @generated * @ordered */ INLINE(4, "INLINE", "inline"); /** * The '<em><b>CUSTOM</b></em>' literal value. * <!-- begin-user-doc --> * <p> * If the meaning of '<em><b>CUSTOM</b></em>' literal object isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @see #CUSTOM * @model literal="custom" * @generated * @ordered */ public static final int CUSTOM_VALUE = 0; /** * The '<em><b>ENVELOPE</b></em>' literal value. * <!-- begin-user-doc --> * <p> * If the meaning of '<em><b>ENVELOPE</b></em>' literal object isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @see #ENVELOPE * @model literal="envelope" * @generated * @ordered */ public static final int ENVELOPE_VALUE = 1; /** * The '<em><b>BODY</b></em>' literal value. * <!-- begin-user-doc --> * <p> * If the meaning of '<em><b>BODY</b></em>' literal object isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @see #BODY * @model literal="body" * @generated * @ordered */ public static final int BODY_VALUE = 2; /** * The '<em><b>PROPERTY</b></em>' literal value. * <!-- begin-user-doc --> * <p> * If the meaning of '<em><b>PROPERTY</b></em>' literal object isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @see #PROPERTY * @model literal="property" * @generated * @ordered */ public static final int PROPERTY_VALUE = 3; /** * The '<em><b>INLINE</b></em>' literal value. * <!-- begin-user-doc --> * <p> * If the meaning of '<em><b>INLINE</b></em>' literal object isn't clear, * there really should be more of a description here... * </p> * <!-- end-user-doc --> * @see #INLINE * @model literal="inline" * @generated * @ordered */ public static final int INLINE_VALUE = 4; /** * An array of all the '<em><b>Enrich Source Type</b></em>' enumerators. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ private static final EnrichSourceType[] VALUES_ARRAY = new EnrichSourceType[] { CUSTOM, ENVELOPE, BODY, PROPERTY, INLINE, }; /** * A public read-only list of all the '<em><b>Enrich Source Type</b></em>' enumerators. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public static final List<EnrichSourceType> VALUES = Collections.unmodifiableList(Arrays.asList(VALUES_ARRAY)); /** * Returns the '<em><b>Enrich Source Type</b></em>' literal with the specified literal value. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param literal the literal. * @return the matching enumerator or <code>null</code>. * @generated */ public static EnrichSourceType get(String literal) { for (int i = 0; i < VALUES_ARRAY.length; ++i) { EnrichSourceType result = VALUES_ARRAY[i]; if (result.toString().equals(literal)) { return result; } } return null; } /** * Returns the '<em><b>Enrich Source Type</b></em>' literal with the specified name. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param name the name. * @return the matching enumerator or <code>null</code>. * @generated */ public static EnrichSourceType getByName(String name) { for (int i = 0; i < VALUES_ARRAY.length; ++i) { EnrichSourceType result = VALUES_ARRAY[i]; if (result.getName().equals(name)) { return result; } } return null; } /** * Returns the '<em><b>Enrich Source Type</b></em>' literal with the specified integer value. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @param value the integer value. * @return the matching enumerator or <code>null</code>. * @generated */ public static EnrichSourceType get(int value) { switch (value) { case CUSTOM_VALUE: return CUSTOM; case ENVELOPE_VALUE: return ENVELOPE; case BODY_VALUE: return BODY; case PROPERTY_VALUE: return PROPERTY; case INLINE_VALUE: return INLINE; } return null; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ private final int value; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ private final String name; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ private final String literal; /** * Only this class can construct instances. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ private EnrichSourceType(int value, String name, String literal) { this.value = value; this.name = name; this.literal = literal; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public int getValue() { return value; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public String getName() { return name; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public String getLiteral() { return literal; } /** * Returns the literal value of the enumerator, which is its string representation. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public String toString() { return literal; } } // EnrichSourceType
Bizzarrus/CloakEngine
CloakEngine/CloakEngine/Rendering/ColorBuffer.h
<filename>CloakEngine/CloakEngine/Rendering/ColorBuffer.h #pragma once #ifndef CE_API_RENDERING_COLORBUFFER_H #define CE_API_RENDERING_COLORBUFFER_H #include "CloakEngine/Defines.h" #include "CloakEngine/Rendering/BasicBuffer.h" namespace CloakEngine { CLOAKENGINE_API_NAMESPACE namespace API { namespace Rendering { inline namespace Context_v1 { CLOAK_INTERFACE IContext; CLOAK_INTERFACE IComputeContext; } inline namespace ColorBuffer_v1 { CLOAK_INTERFACE_ID("{E85004C9-A8F0-4771-9E6D-543097ADB77C}") IColorBuffer : public virtual IPixelBuffer{ public: virtual ResourceView CLOAK_CALL_THIS GetSRV(In_opt bool cube = false) const = 0; virtual ResourceView CLOAK_CALL_THIS GetUAV(In_opt UINT num = 0) const = 0; virtual ResourceView CLOAK_CALL_THIS GetDynamicSRV(In_opt bool cube = false) const = 0; virtual ResourceView CLOAK_CALL_THIS GetDynamicUAV(In_opt UINT num = 0) const = 0; virtual ResourceView CLOAK_CALL_THIS GetRTV(In_opt UINT num = 0) const = 0; virtual const Helper::Color::RGBA& CLOAK_CALL_THIS GetClearColor() const = 0; virtual void CLOAK_CALL_THIS Recreate(In const TEXTURE_DESC& desc) = 0; }; } } } } #endif
xym100111100/bussines-web
src/services/afcflow.js
<filename>src/services/afcflow.js<gh_stars>0 import { stringify } from 'qs'; import request from '../utils/request'; export async function personList(params) { return request(`/afc-svr/afc/personTradeList?${stringify(params)}`); } export async function orgList(params) { return request(`/afc-svr/afc/orgTradeList?${stringify(params)}`); } export async function orgTrade(params) { return request(`/afc-svr/afc/orgTrade?${stringify(params)}`); } export async function tradeList(params) { return request(`/afc-svr/afc/trade?${stringify(params)}`); }
SenonLi/VS_OpenGLSL_4.1
vsSenOpenGL/LearnOpenGL_GLFW/Sen_26_PostProcessing.h
<reponame>SenonLi/VS_OpenGLSL_4.1 #pragma once #ifndef __Sen_26_PostProcessing__ #define __Sen_26_PostProcessing__ #include "LearnOpenGL_GLFW/SenFreeSpaceAbstract.h" class Sen_26_PostProcessing : public SenFreeSpaceAbstract { public: Sen_26_PostProcessing(); virtual ~Sen_26_PostProcessing(); protected: void initGlfwGlewGL(); void paintFreeSpaceGL(void); void cleanFreeSpace(void); Sen_Cube *similarCube; GLuint cubeTexture, floorTexture; glm::vec3 firstCubePosition = glm::vec3(-1.0f, 0.0f + SenFREESPACE_ZfightingOffset, -1.0f); glm::vec3 secondCubePosition = glm::vec3(2.0f, 0.0f + SenFREESPACE_ZfightingOffset, 0.0f); // ==== FrameBuffer ==== GLuint testFrameBufferObject; GLuint textureColorBuffer, depthStencilRenderBufferObject; GLint testFrameBufferWidth, testFrameBufferHeight; }; #endif
ktzevani/native-camera-vulkan
app/src/main/cpp/graphics/resources/image.hpp
/* * Copyright 2020 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef NCV_RESOURCES_IMAGE_HPP #define NCV_RESOURCES_IMAGE_HPP #include <graphics/resources/base.hpp> #include <graphics/resources/types.hpp> namespace graphics{ namespace resources{ class image_base : public base { public: image_base(const vk::PhysicalDevice& a_gpu, const vk::UniqueDevice& a_device) : base{a_gpu, a_device} {} virtual ~image_base() { destroy_resources(); } vk::Image& get() { return m_image; } vk::ImageView& get_img_view() { return m_img_view; } protected: virtual void destroy_resources() noexcept; vk::DeviceMemory m_memory = nullptr; vk::Image m_image = nullptr; vk::ImageView m_img_view = nullptr; }; template<typename Policy, typename ImageDataFormat> class image : public image_base {}; template<> class image<external> : public image_base { public: image(const vk::PhysicalDevice& a_gpu, const vk::UniqueDevice& a_device, AHardwareBuffer* a_buffer); ~image() { destroy_resources(); } void update(vk::ImageUsageFlags a_usage, vk::SharingMode a_sharing, AHardwareBuffer* a_buffer); vk::Sampler& get_sampler() { return m_sampler; } private: void destroy_resources() noexcept override; vk::Sampler m_sampler = nullptr; vk::SamplerYcbcrConversion m_conversion = nullptr; }; template<typename ImageDataFormat> class image<host, ImageDataFormat> {}; template<> class image<device> : public image_base { public: image(const vk::PhysicalDevice& a_gpu, const vk::UniqueDevice& a_device, vk::ImageUsageFlags a_usage, vk::SharingMode a_sharing, const vk::Extent2D& a_extent); }; template<typename ImageDataFormat> class image<device_upload, ImageDataFormat> : public image_base { public: image(const vk::PhysicalDevice& a_gpu, const vk::UniqueDevice& a_device, vk::ImageUsageFlags a_usage, vk::SharingMode a_sharing, vk::Extent3D& a_extent, vk::Format a_format, const std::vector<ImageDataFormat>& a_data); ~image() { destroy_resources(); } void update_staging(const std::vector<ImageDataFormat>& a_data); vk::Buffer& get_staging() { return m_staging_buffer; } vk::DeviceSize size_staging() { return m_staging_size; } private: void destroy_resources() noexcept override; vk::DeviceSize m_staging_size = 0; vk::DeviceMemory m_staging_memory = nullptr; vk::Buffer m_staging_buffer = nullptr; }; template<typename ImageDataFormat> image<device_upload, ImageDataFormat>::image(const vk::PhysicalDevice &a_gpu, const vk::UniqueDevice &a_device, vk::ImageUsageFlags a_usage, vk::SharingMode a_sharing, vk::Extent3D &a_extent, vk::Format a_format, const std::vector<ImageDataFormat> &a_data) : image_base{a_gpu, a_device} { using namespace ::vk; int num_channels; switch (a_format) { case vk::Format::eR8G8B8A8Srgb: case vk::Format::eB8G8R8A8Srgb: num_channels = 4; break; case vk::Format::eR8G8B8Srgb: case vk::Format::eB8G8R8Srgb: num_channels = 3; break; default: num_channels = -1; } if(num_channels == -1) throw std::runtime_error{"Format is not supported."}; m_data_size = a_extent.width * a_extent.height * num_channels; BufferCreateInfo staging_buffer_info; staging_buffer_info.usage = BufferUsageFlagBits::eTransferSrc; staging_buffer_info.size = m_data_size; staging_buffer_info.sharingMode = a_sharing; m_staging_buffer = m_device.createBuffer(staging_buffer_info); auto mem_reqs = m_device.getBufferMemoryRequirements(m_staging_buffer); m_staging_size = mem_reqs.size; MemoryAllocateInfo mem_info; mem_info.memoryTypeIndex = get_memory_index(mem_reqs.memoryTypeBits, memory_location::host); mem_info.allocationSize = mem_reqs.size; try { m_staging_memory = m_device.allocateMemory(mem_info); } catch (std::exception const &e) { destroy_resources(); throw e; } uint8_t* pt; MemoryMapFlags map_flags {0}; auto mresult = m_device.mapMemory(m_staging_memory, 0, mem_reqs.size, map_flags, reinterpret_cast<void**>(&pt)); if(mresult == Result::eSuccess) { memcpy(pt, a_data.data(), m_staging_size); } else { destroy_resources(); throw std::runtime_error{"Result is: " + to_string(mresult) + ". Could not copy image data to staging buffer."}; } m_device.unmapMemory(m_staging_memory); try { m_device.bindBufferMemory(m_staging_buffer, m_staging_memory, 0); } catch (std::exception const &e) { destroy_resources(); throw e; } ImageCreateInfo image_info; //image_info.flags image_info.imageType = ImageType::e2D; image_info.format = a_format; image_info.extent.width = a_extent.width; image_info.extent.height = a_extent.height; image_info.extent.depth = a_extent.depth; image_info.mipLevels = 1; image_info.arrayLayers = 1; image_info.samples = SampleCountFlagBits::e1; image_info.tiling = ImageTiling::eOptimal; image_info.usage = ImageUsageFlagBits::eTransferDst | a_usage; image_info.sharingMode = a_sharing; image_info.queueFamilyIndexCount = 0; image_info.pQueueFamilyIndices = nullptr; image_info.initialLayout = ImageLayout::eUndefined; m_image = m_device.createImage(image_info); mem_reqs = m_device.getImageMemoryRequirements(m_image); mem_info.memoryTypeIndex = get_memory_index(mem_reqs.memoryTypeBits, memory_location::device); mem_info.allocationSize = mem_reqs.size; m_size = mem_reqs.size; ImageViewCreateInfo img_view_info; img_view_info.image = m_image; img_view_info.viewType = ImageViewType::e2D; img_view_info.format = a_format; //img_view_info.components img_view_info.subresourceRange.aspectMask = ImageAspectFlagBits::eColor; img_view_info.subresourceRange.baseMipLevel = 0; img_view_info.subresourceRange.levelCount = 1; img_view_info.subresourceRange.baseArrayLayer = 0; img_view_info.subresourceRange.layerCount = 1; try { m_memory = m_device.allocateMemory(mem_info); m_device.bindImageMemory(m_image, m_memory, 0); m_img_view = m_device.createImageView(img_view_info); } catch(std::exception const &e) { destroy_resources(); throw e; } } template<typename ImageDataFormat> void image<device_upload, ImageDataFormat>::destroy_resources() noexcept { if(!!m_staging_buffer) m_device.destroyBuffer(m_staging_buffer); if(!!m_staging_memory) m_device.freeMemory(m_staging_memory); } template<typename ImageDataFormat> void image<device_upload, ImageDataFormat>::update_staging(const std::vector<ImageDataFormat> &a_data) { if(a_data.size() * sizeof(ImageDataFormat) != m_data_size) throw std::runtime_error{"Data size differs. Cannot update buffer."}; uint8_t* pt; vk::MemoryMapFlags map_flags {0}; auto result = m_device.mapMemory(m_staging_memory, 0, m_staging_size, map_flags, reinterpret_cast<void**>(&pt)); if(result == vk::Result::eSuccess) memcpy(pt, a_data.data(), m_staging_size); else throw std::runtime_error{"Result is: " + to_string(result) + ". Could not copy host data."}; m_device.unmapMemory(m_staging_memory); } }} #endif //NCV_RESOURCES_IMAGE_HPP
SoftwareAG/cumulocity-agents-opc
opcua-agent/gateway/src/main/java/com/cumulocity/opcua/gateway/repository/core/GatewayRepository.java
<gh_stars>0 package com.cumulocity.opcua.gateway.repository.core; import com.cumulocity.model.idtype.GId; import com.cumulocity.opcua.gateway.model.gateway.Gateway; import com.google.common.base.Optional; import lombok.NonNull; import java.util.Collection; public interface GatewayRepository<E> { @NonNull Optional<E> get(@NonNull Gateway gateway, @NonNull GId key); @NonNull Collection<E> findAll(@NonNull Gateway gateway); boolean exists(@NonNull Gateway gateway, @NonNull GId value); E save(@NonNull Gateway gateway, @NonNull E value); E delete(@NonNull Gateway gateway, @NonNull GId value); }
Klkoenig217/openroberta-lab
OpenRobertaRobot/src/test/java/de/fhg/iais/roberta/util/test/SenderReceiverJUnit.java
package de.fhg.iais.roberta.util.test; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import org.junit.Assert; import org.slf4j.LoggerFactory; public class SenderReceiverJUnit { final AtomicBoolean wasTheReceiverSuccessful = new AtomicBoolean(false); final AtomicBoolean wasTheSenderSuccessful = new AtomicBoolean(false); final AtomicBoolean isTheReceiverWaiting = new AtomicBoolean(false); public SenderReceiverJUnit() { } public void run(final ThreadedFunction receiver, final ThreadedFunction sender) throws Exception { Runnable theReceiverThread = new Runnable() { @Override public void run() { try { SenderReceiverJUnit.this.isTheReceiverWaiting.set(true); boolean result = receiver.apply(); SenderReceiverJUnit.this.wasTheReceiverSuccessful.set(result); } catch ( Exception e ) { SenderReceiverJUnit.this.wasTheReceiverSuccessful.set(false); LoggerFactory.getLogger("theReceiverThread").error("Exception", e); } } }; Runnable theSenderThread = new Runnable() { @Override public void run() { try { int numberOfSleeps = 0; Thread.sleep(100); boolean receiverWaiting = SenderReceiverJUnit.this.isTheReceiverWaiting.get(); while ( !receiverWaiting && numberOfSleeps < 1000 ) { numberOfSleeps++; Thread.sleep(1); receiverWaiting = SenderReceiverJUnit.this.isTheReceiverWaiting.get(); } if ( numberOfSleeps > 0 ) { LoggerFactory.getLogger("theServerThread").info("ExecutorService forced " + numberOfSleeps + " 1msec sleep"); } boolean result = sender.apply(); SenderReceiverJUnit.this.wasTheSenderSuccessful.set(result); } catch ( Throwable e ) { SenderReceiverJUnit.this.wasTheSenderSuccessful.set(false); LoggerFactory.getLogger("theSenderThread").error("Exception", e); } } }; ExecutorService executorService = Executors.newFixedThreadPool(2); Future<?> receiverFuture = executorService.submit(theReceiverThread); Future<?> senderFuture = executorService.submit(theSenderThread); try { receiverFuture.get(1000000, TimeUnit.MILLISECONDS); } catch ( TimeoutException e ) { Assert.fail("Timeout of receiver"); } senderFuture.get(); Assert.assertTrue("Receiver finished with error", this.wasTheReceiverSuccessful.get()); Assert.assertTrue("Sender finished with error", this.wasTheSenderSuccessful.get()); } }
ibm-op-release/hcode
import/chips/p9/procedures/ppe_closed/ippe/ioa/pk_app_cfg.h
/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* $Source: import/chips/p9/procedures/ppe_closed/ippe/ioa/pk_app_cfg.h $ */ /* */ /* OpenPOWER HCODE Project */ /* */ /* COPYRIGHT 2015,2017 */ /* [+] International Business Machines Corp. */ /* */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */ /* implied. See the License for the specific language governing */ /* permissions and limitations under the License. */ /* */ /* IBM_PROLOG_END_TAG */ /// /// @file pk_app_cfg.h /// @brief Ppe Kernal Application Config. ///----------------------------------------------------------------------------- /// *HWP HWP Owner : <NAME> <<EMAIL>> /// *HWP HWP Backup Owner : <NAME> <<EMAIL>> /// *HWP FW Owner : <NAME> <<EMAIL>> /// *HWP Team : IO /// *HWP Level : 3 /// *HWP Consumed by : FSP:HB ///----------------------------------------------------------------------------- /// /// @verbatim /// Ppe Kernal Application Config // /// @endverbatim ///---------------------------------------------------------------------------- #ifndef __PK_APP_CFG_H__ #define __PK_APP_CFG_H__ // -------------------- // If we are using the external timebase then assume // a frequency of 37.5Mhz. Otherwise, the default is to use // the decrementer as a timebase and assume a frequency of // 600MHz // In product code, this value will be IPL-time configurable. #ifdef APPCFG_USE_EXT_TIMEBASE #define PPE_TIMEBASE_HZ 37500000 #else #define PPE_TIMEBASE_HZ 600000000 #endif /* APPCFG_USE_EXT_TIMEBASE */ // -------------------- /// This file provides architecture-specific symbol names for each interrupt #include "p9_abus_interrupts.h" /// This application will statically initialize it's external interrupt table /// using the table defined in abus_main.c. #define STATIC_IRQ_TABLE #endif /*__PK_APP_CFG_H__*/
rapid7/harp
harp-amqp-relay-lib/src/main/java/com/rapid7/component/messaging/relay/MessageHandler.java
<reponame>rapid7/harp /*************************************************************************** * Copyright (c) 2013, Rapid7 Inc * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * * Neither the name of Rapid7 nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ package com.rapid7.component.messaging.relay; import com.rapid7.component.messaging.relay.encoding.MessageRelayEncoding.EncodedMessage; /** * An interface for classes that handle, process, inspect or filter incoming Protobuf encoded messages. */ public interface MessageHandler { /** * Handle the message. * @param message The message to handle. * @return A boolean. If false, the message should be considered "filtered out" and should not be * passed to subsequent handlers in a sequence or chain. */ public boolean handle(EncodedMessage message); }
courtneyeh/teku
networking/p2p/src/main/java/tech/pegasys/teku/networking/p2p/rpc/RpcMethod.java
<filename>networking/p2p/src/main/java/tech/pegasys/teku/networking/p2p/rpc/RpcMethod.java /* * Copyright ConsenSys Software Inc., 2022 * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package tech.pegasys.teku.networking.p2p.rpc; import java.util.List; import org.apache.tuweni.bytes.Bytes; public interface RpcMethod< TOutgoingHandler extends RpcRequestHandler, TRequest, RespHandler extends RpcResponseHandler<?>> { /** * Return a list of supported protocol ids. Protocols are prioritized by ordering, so that the * first protocol in the list is preferred over the next protocol. * * @return A non-empty list of supported protocol ids */ List<String> getIds(); /** * Encodes a request to be sent * * @param request An outgoing request payload * @return The serialized request */ Bytes encodeRequest(TRequest request); /** * Create a request handler for the selected protocol id, which should be one of the values * returned from getId() * * @param protocolId The protocolId to be handled * @return A request handler for the given protocol id */ RpcRequestHandler createIncomingRequestHandler(final String protocolId); TOutgoingHandler createOutgoingRequestHandler( String protocolId, TRequest request, RespHandler responseHandler); }
UOC/dlkit
tests/functional/test_authz/assessment_authoring/test_assessment_part_authz.py
<gh_stars>1-10 """TestAuthZ implementations of assessment_authoring.AssessmentPart""" import datetime import pytest from tests.utilities.general import is_never_authz, is_no_authz, uses_cataloging from dlkit.abstract_osid.authorization import objects as ABCObjects from dlkit.abstract_osid.authorization import queries as ABCQueries from dlkit.abstract_osid.authorization.objects import Authorization from dlkit.abstract_osid.authorization.objects import AuthorizationList from dlkit.abstract_osid.authorization.objects import Vault as ABCVault from dlkit.abstract_osid.osid import errors from dlkit.abstract_osid.osid.objects import OsidCatalogForm, OsidCatalog from dlkit.abstract_osid.osid.objects import OsidForm from dlkit.primordium.calendaring.primitives import DateTime from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import Type from dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) JANE_REQUEST = proxy_example.SimpleRequest(username='jane_doe') JANE_CONDITION = PROXY_SESSION.get_proxy_condition() JANE_CONDITION.set_http_request(JANE_REQUEST) JANE_PROXY = PROXY_SESSION.get_proxy(JANE_CONDITION) LOOKUP_ASSESSMENT_PART_FUNCTION_ID = Id(**{'identifier': 'lookup', 'namespace': 'assessment_authoring.AssessmentPart', 'authority': 'ODL.MIT.EDU'}) SEARCH_ASSESSMENT_PART_FUNCTION_ID = Id(**{'identifier': 'search', 'namespace': 'assessment_authoring.AssessmentPart', 'authority': 'ODL.MIT.EDU'}) CREATE_ASSESSMENT_PART_FUNCTION_ID = Id(**{'identifier': 'create', 'namespace': 'assessment_authoring.AssessmentPart', 'authority': 'ODL.MIT.EDU'}) DELETE_ASSESSMENT_PART_FUNCTION_ID = Id(**{'identifier': 'delete', 'namespace': 'assessment_authoring.AssessmentPart', 'authority': 'ODL.MIT.EDU'}) ASSIGN_ASSESSMENT_PART_FUNCTION_ID = Id(**{'identifier': 'assign', 'namespace': 'assessment_authoring.AssessmentPartBank', 'authority': 'ODL.MIT.EDU'}) CREATE_BANK_FUNCTION_ID = Id(**{'identifier': 'create', 'namespace': 'assessment.Bank', 'authority': 'ODL.MIT.EDU'}) DELETE_BANK_FUNCTION_ID = Id(**{'identifier': 'delete', 'namespace': 'assessment.Bank', 'authority': 'ODL.MIT.EDU'}) LOOKUP_BANK_FUNCTION_ID = Id(**{'identifier': 'lookup', 'namespace': 'assessment.Bank', 'authority': 'ODL.MIT.EDU'}) ACCESS_BANK_HIERARCHY_FUNCTION_ID = Id(**{'identifier': 'access', 'namespace': 'assessment.Bank', 'authority': 'ODL.MIT.EDU'}) MODIFY_BANK_HIERARCHY_FUNCTION_ID = Id(**{'identifier': 'modify', 'namespace': 'assessment.Bank', 'authority': 'ODL.MIT.EDU'}) ROOT_QUALIFIER_ID = Id('assessment.Bank%3AROOT%40ODL.MIT.EDU') BOOTSTRAP_VAULT_TYPE = Type(authority='ODL.MIT.EDU', namespace='authorization.Vault', identifier='bootstrap_vault') OVERRIDE_VAULT_TYPE = Type(authority='ODL.MIT.EDU', namespace='authorization.Vault', identifier='override_vault') DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) DEFAULT_GENUS_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'GenusType', 'authority': 'DLKIT.MIT.EDU'}) ALIAS_ID = Id(**{'identifier': 'ALIAS', 'namespace': 'ALIAS', 'authority': 'ALIAS'}) AGENT_ID = Id(**{'identifier': 'jane_doe', 'namespace': 'osid.agent.Agent', 'authority': 'MIT-ODL'}) NEW_TYPE = Type(**{'identifier': 'NEW', 'namespace': 'MINE', 'authority': 'YOURS'}) NEW_TYPE_2 = Type(**{'identifier': 'NEW 2', 'namespace': 'MINE', 'authority': 'YOURS'}) BLUE_TYPE = Type(authority='BLUE', namespace='BLUE', identifier='BLUE') @pytest.fixture(scope="class", params=['TEST_SERVICE']) def authz_adapter_class_fixture(request): request.cls.service_config = request.param request.cls.authz_mgr = Runtime().get_manager( 'AUTHORIZATION', implementation='TEST_SERVICE') if not is_never_authz(request.cls.service_config): request.cls.vault_admin_session = request.cls.authz_mgr.get_vault_admin_session() request.cls.vault_lookup_session = request.cls.authz_mgr.get_vault_lookup_session() create_form = request.cls.vault_admin_session.get_vault_form_for_create([]) create_form.display_name = 'Test Vault' create_form.description = 'Test Vault for AuthorizationSession tests' create_form.genus_type = BOOTSTRAP_VAULT_TYPE request.cls.vault = request.cls.vault_admin_session.create_vault(create_form) create_form = request.cls.vault_admin_session.get_vault_form_for_create([]) create_form.display_name = 'Test Override Vault' create_form.description = 'Test Override Vault for AuthorizationSession tests' create_form.genus_type = OVERRIDE_VAULT_TYPE request.cls.override_vault = request.cls.vault_admin_session.create_vault(create_form) request.cls.authz_admin_session = request.cls.authz_mgr.get_authorization_admin_session_for_vault(request.cls.vault.ident) request.cls.override_authz_admin_session = request.cls.authz_mgr.get_authorization_admin_session_for_vault(request.cls.override_vault.ident) request.cls.authz_lookup_session = request.cls.authz_mgr.get_authorization_lookup_session_for_vault(request.cls.vault.ident) request.cls.bank_list = list() request.cls.bank_id_list = list() request.cls.authz_list = list() request.cls.authz_id_list = list() request.cls.assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT_AUTHORING', proxy=PROXY, implementation='TEST_SERVICE') for num in [0, 1, 2, 3, 4, 5, 6, 7]: create_form = request.cls.assessment_authoring_mgr.get_bank_form_for_create([]) create_form.display_name = 'Test Bank ' + str(num) create_form.description = 'Test Bank for Testing Authorization Number: ' + str(num) bank = request.cls.assessment_authoring_mgr.create_bank(create_form) request.cls.bank_list.append(bank) request.cls.bank_id_list.append(bank.ident) request.cls.assessment_authoring_mgr.add_root_bank(request.cls.bank_id_list[0]) request.cls.assessment_authoring_mgr.add_child_bank(request.cls.bank_id_list[0], request.cls.bank_id_list[1]) request.cls.assessment_authoring_mgr.add_child_bank(request.cls.bank_id_list[0], request.cls.bank_id_list[2]) request.cls.assessment_authoring_mgr.add_child_bank(request.cls.bank_id_list[1], request.cls.bank_id_list[3]) request.cls.assessment_authoring_mgr.add_child_bank(request.cls.bank_id_list[1], request.cls.bank_id_list[4]) request.cls.assessment_authoring_mgr.add_child_bank(request.cls.bank_id_list[2], request.cls.bank_id_list[5]) # The hierarchy should look like this. (t) indicates where lookup is # explicitely authorized: # # _____ 0 _____ # | | # _ 1(t) _ 2 not in hierarchy # | | | # 3 4 5(t) 6 7(t) (the 'blue' assessment_part in bank 2 is also assigned to bank 7) request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) request.cls.catalog = request.cls.svc_mgr.get_vault(request.cls.vault.ident) # Set up Bank lookup authorization for Jane create_form = request.cls.authz_admin_session.get_authorization_form_for_create_for_agent( AGENT_ID, LOOKUP_BANK_FUNCTION_ID, ROOT_QUALIFIER_ID, []) create_form.display_name = 'Jane Lookup Authorization' create_form.description = 'Test Authorization for AuthorizationSession tests' jane_lookup_authz = request.cls.authz_admin_session.create_authorization(create_form) request.cls.authz_list.append(jane_lookup_authz) request.cls.authz_id_list.append(jane_lookup_authz.ident) # Set up AssessmentPart lookup authorizations for Jane for num in [1, 5]: create_form = request.cls.authz_admin_session.get_authorization_form_for_create_for_agent( AGENT_ID, LOOKUP_ASSESSMENT_PART_FUNCTION_ID, request.cls.bank_id_list[num], []) create_form.display_name = 'Test Authorization ' + str(num) create_form.description = 'Test Authorization for AuthorizationSession tests' authz = request.cls.authz_admin_session.create_authorization(create_form) request.cls.authz_list.append(authz) request.cls.authz_id_list.append(authz.ident) # Set up AssessmentPart lookup override authorizations for Jane for num in [7]: create_form = request.cls.override_authz_admin_session.get_authorization_form_for_create_for_agent( AGENT_ID, LOOKUP_ASSESSMENT_PART_FUNCTION_ID, request.cls.bank_id_list[num], []) create_form.display_name = 'Test Authorization ' + str(num) + ' (override)' create_form.description = 'Test Authorization for AuthorizationSession tests' authz = request.cls.override_authz_admin_session.create_authorization(create_form) request.cls.authz_list.append(authz) request.cls.authz_id_list.append(authz.ident) # Set up AssessmentPart search override authorizations for Jane for num in [7]: create_form = request.cls.override_authz_admin_session.get_authorization_form_for_create_for_agent( AGENT_ID, SEARCH_ASSESSMENT_PART_FUNCTION_ID, request.cls.bank_id_list[num], []) create_form.display_name = 'Test Authorization ' + str(num) + ' (override)' create_form.description = 'Test Authorization for AuthorizationSession tests' authz = request.cls.override_authz_admin_session.create_authorization(create_form) request.cls.authz_list.append(authz) request.cls.authz_id_list.append(authz.ident) # Set up AssessmentPart search authorizations for Jane for num in [1, 5]: create_form = request.cls.authz_admin_session.get_authorization_form_for_create_for_agent( AGENT_ID, SEARCH_ASSESSMENT_PART_FUNCTION_ID, request.cls.bank_id_list[num], []) create_form.display_name = 'Test Authorization ' + str(num) create_form.description = 'Test Authorization for AuthorizationSession tests' authz = request.cls.authz_admin_session.create_authorization(create_form) request.cls.authz_list.append(authz) request.cls.authz_id_list.append(authz.ident) else: request.cls.catalog = request.cls.svc_mgr.get_authorization_session(proxy=PROXY) def class_tear_down(): if not is_never_authz(request.cls.service_config): for catalog in request.cls.assessment_authoring_mgr.get_banks(): for obj in catalog.get_assessment_parts(): catalog.delete_assessment_part(obj.ident) request.cls.assessment_authoring_mgr.delete_bank(catalog.ident) for vault in request.cls.vault_lookup_session.get_vaults(): lookup_session = request.cls.authz_mgr.get_authorization_lookup_session_for_vault(vault.ident) admin_session = request.cls.authz_mgr.get_authorization_admin_session_for_vault(vault.ident) for authz in lookup_session.get_authorizations(): admin_session.delete_authorization(authz.ident) request.cls.vault_admin_session.delete_vault(vault.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope="function") def authz_adapter_test_fixture(request): request.cls.assessment_part_id_lists = [] count = 0 if not is_never_authz(request.cls.service_config): create_form = request.cls.bank_list[0].get_assessment_form_for_create([]) create_form.display_name = 'Assessment for AssessmentOffered Tests' create_form.description = 'Assessment for authz adapter tests for AssessmentOffered' request.cls.assessment = request.cls.bank_list[0].create_assessment(create_form) for bank_ in request.cls.bank_list: request.cls.assessment_part_id_lists.append([]) for color in ['Red', 'Blue', 'Red']: create_form = bank_.get_assessment_part_form_for_create_for_assessment(request.cls.assessment.ident, []) create_form.display_name = color + ' ' + str(count) + ' AssessmentPart' create_form.description = color + ' assessment_part for authz adapter tests from Bank number ' + str(count) if color == 'Blue': create_form.genus_type = BLUE_TYPE assessment_part = bank_.create_assessment_part_for_assessment(create_form) if count == 2 and color == 'Blue': request.cls.assessment_authoring_mgr.assign_assessment_part_to_bank( assessment_part.ident, request.cls.bank_id_list[7]) request.cls.assessment_part_id_lists[count].append(assessment_part.ident) count += 1 def test_tear_down(): if not is_never_authz(request.cls.service_config): for index, bank_ in enumerate(request.cls.bank_list): for assessment_part_id in request.cls.assessment_part_id_lists[index]: bank_.delete_assessment_part(assessment_part_id) request.cls.bank_list[0].delete_assessment(request.cls.assessment.ident) request.addfinalizer(test_tear_down) @pytest.mark.usefixtures("authz_adapter_class_fixture", "authz_adapter_test_fixture") class TestAssessmentPartAuthzAdapter(object): def test_lookup_bank_0_plenary_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[0]) bank.use_isolated_bank_view() bank.use_plenary_assessment_part_view() # with pytest.raises(errors.NotFound): # assessment_parts = bank.get_assessment_parts() # with pytest.raises(errors.NotFound): # assessment_parts = bank.get_assessment_parts_by_genus_type(BLUE_TYPE) # for assessment_part_id in self.assessment_part_id_lists[0]: # with pytest.raises(errors.NotFound): # assessment_part = bank.get_assessment_part(assessment_part_id) # with pytest.raises(errors.NotFound): # assessment_parts = bank.get_assessment_parts_by_ids(self.assessment_part_id_lists[0]) def test_lookup_bank_0_plenary_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[0]) bank.use_federated_bank_view() bank.use_plenary_assessment_part_view() assert bank.can_lookup_assessment_parts() assert bank.get_assessment_parts().available() == 1 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).next().ident == self.assessment_part_id_lists[2][1] bank.get_assessment_part(self.assessment_part_id_lists[2][1]) for assessment_part_num in [0, 2]: with pytest.raises(errors.NotFound): # Is this right? Perhaps PermissionDenied assessment_part = bank.get_assessment_part(self.assessment_part_id_lists[2][assessment_part_num]) def test_lookup_bank_0_comparative_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[0]) bank.use_federated_bank_view() bank.use_comparative_assessment_part_view() # print "START" assert bank.get_assessment_parts().available() == 13 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 5 for assessment_part in bank.get_assessment_parts(): bank.get_assessment_part(assessment_part.ident) assessment_part_ids = [assessment_part.ident for assessment_part in bank.get_assessment_parts()] bank.get_assessment_parts_by_ids(assessment_part_ids) for assessment_part_id in self.assessment_part_id_lists[0]: with pytest.raises(errors.NotFound): assessment_part = bank.get_assessment_part(assessment_part_id) assessment_part = bank.get_assessment_part(self.assessment_part_id_lists[2][1]) for assessment_part_num in [0, 2]: with pytest.raises(errors.NotFound): assessment_part = bank.get_assessment_part(self.assessment_part_id_lists[2][assessment_part_num]) for assessment_part_id in self.assessment_part_id_lists[1]: assessment_part = bank.get_assessment_part(assessment_part_id) for assessment_part_id in self.assessment_part_id_lists[3]: assessment_part = bank.get_assessment_part(assessment_part_id) for assessment_part_id in self.assessment_part_id_lists[4]: assessment_part = bank.get_assessment_part(assessment_part_id) for assessment_part_id in self.assessment_part_id_lists[5]: assessment_part = bank.get_assessment_part(assessment_part_id) def test_lookup_bank_0_comparative_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[0]) bank.use_isolated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 0 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 0 def test_lookup_bank_1_plenary_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[1]) bank.use_isolated_bank_view() bank.use_plenary_assessment_part_view() assert bank.get_assessment_parts().available() == 3 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 def test_lookup_bank_1_plenary_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[1]) bank.use_federated_bank_view() bank.use_plenary_assessment_part_view() assert bank.get_assessment_parts().available() == 9 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 3 def test_lookup_bank_1_comparative_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[1]) bank.use_federated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 9 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 3 def test_lookup_bank_1_comparative_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[1]) bank.use_isolated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 3 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 def test_lookup_bank_2_plenary_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[2]) bank.use_isolated_bank_view() bank.use_plenary_assessment_part_view() assert bank.get_assessment_parts().available() == 1 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 # with pytest.raises(errors.PermissionDenied): # assessment_parts = bank.get_assessment_parts() # with pytest.raises(errors.PermissionDenied): # assessment_parts = bank.get_assessment_parts_by_genus_type(BLUE_TYPE) def test_lookup_bank_2_plenary_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[2]) bank.use_federated_bank_view() bank.use_plenary_assessment_part_view() assert bank.get_assessment_parts().available() == 1 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 # with pytest.raises(errors.PermissionDenied): # assessment_parts = bank.get_assessment_parts() # with pytest.raises(errors.PermissionDenied): # assessment_parts = bank.get_assessment_parts_by_genus_type(BLUE_TYPE) def test_lookup_bank_2_comparative_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[2]) bank.use_federated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 4 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 2 # self.assertEqual(bank.get_assessment_parts().available(), 3) # self.assertEqual(bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available(), 1) def test_lookup_bank_2_comparative_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[2]) bank.use_isolated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 1 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 # with pytest.raises(errors.PermissionDenied): # assessment_parts = bank.get_assessment_parts() # with pytest.raises(errors.PermissionDenied): # assessment_parts = bank.get_assessment_parts_by_genus_type(BLUE_TYPE) def test_lookup_bank_3_plenary_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[3]) bank.use_isolated_bank_view() bank.use_plenary_assessment_part_view() assert bank.get_assessment_parts().available() == 3 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 def test_lookup_bank_3_plenary_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[3]) bank.use_federated_bank_view() bank.use_plenary_assessment_part_view() assert bank.get_assessment_parts().available() == 3 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 def test_lookup_bank_3_comparative_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[3]) bank.use_federated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 3 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 def test_lookup_bank_3_comparative_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[3]) bank.use_isolated_bank_view() bank.use_comparative_assessment_part_view() assert bank.get_assessment_parts().available() == 3 assert bank.get_assessment_parts_by_genus_type(BLUE_TYPE).available() == 1 def test_query_bank_0_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[0]) bank.use_isolated_bank_view() with pytest.raises(errors.PermissionDenied): query = bank.get_assessment_part_query() def test_query_bank_0_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[0]) bank.use_federated_bank_view() query = bank.get_assessment_part_query() query.match_display_name('red') assert bank.get_assessment_parts_by_query(query).available() == 8 query.clear_display_name_terms() query.match_display_name('blue') assert bank.get_assessment_parts_by_query(query).available() == 5 def test_query_bank_1_isolated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[1]) bank.use_isolated_bank_view() query = bank.get_assessment_part_query() query.match_display_name('red') assert bank.get_assessment_parts_by_query(query).available() == 2 def test_query_bank_1_federated(self): if not is_never_authz(self.service_config): janes_assessment_authoring_mgr = Runtime().get_service_manager( 'ASSESSMENT', proxy=JANE_PROXY, implementation='TEST_SERVICE_JSON_AUTHZ') bank = janes_assessment_authoring_mgr.get_bank(self.bank_id_list[1]) bank.use_federated_bank_view() query = bank.get_assessment_part_query() query.match_display_name('red') assert bank.get_assessment_parts_by_query(query).available() == 6
denkaty/Java-OOP
08.Interfaces and Abstraction - Exercise/06.MilitaryElite/Enums/State.java
<gh_stars>0 package MilitaryElite_06.Enums; public enum State { INPROGRESS("inProgress"), FINISHED("finished"); private String state; State(String state) { this.state = state; } public String getState() { return state; } public void setState(String state) { this.state = state; } }
Hiraishi-Ryota/assignment
resources/js/pages/login.js
<filename>resources/js/pages/login.js import React from "react"; import { Button } from '@material-ui/core'; import { useForm } from "react-hook-form"; import { useDispatch, useSelector } from "react-redux"; import { useHistory} from 'react-router-dom'; import { is_authenticated_selector, login } from "../stores/store"; import store from "../stores/index"; import { ErrorMessageRapper } from "../parts/ErrorMessageRapper"; function Login(props) { const { register, formState: { errors }, handleSubmit, setValue, } = useForm({ mode: "onChange", criteriaMode: "all" }); const history = useHistory() const dispatch = useDispatch() const onSubmit = async (data) => { const loginInfo = { email: data.email, password: <PASSWORD> }; await dispatch(login(loginInfo, props.setMessage, props.setUserInfo, history)) // Check whether login was secess or not by using the store. console.log(store.getState().auth.user) setValue("password", "") } const emailRules = { required: "メールアドレスの入力は必須です", pattern: { value: /^[\w+\-.]+@([a-z0-9-]+\.)+[a-z]+$/i, message: "有効なメールアドレスを入力してください" }, } const passwordRules = { required: "パスワードの入力は必須です", minLength: { value: 8, message: "パスワードは8文字以上で入力してください" }, maxLength: { value: 64, message: "パスワードは64文字以内で入力してください" } } return ( <div> <div className="login-part"> <div> <label className="form-label">メールアドレス</label><br/> <input id="email" className="login-form" name="email" {...register("email", emailRules)} /> <ErrorMessageRapper name={"email"} errors={errors}/> </div> <br/> <div> <label className="form-label">パスワード</label><br/> <input id="password" className="login-form" name="password" type="password" {...register("password", passwordRules)} /> <ErrorMessageRapper name={"password"} errors={errors} /> </div> <br/> <div id="submit_form"> <Button color="primary" variant="contained" id="login-button" onClick={handleSubmit(onSubmit)}>ログイン</Button> </div> </div> <a className="login_link" href="/">&lt;&lt; トップページに戻る</a> </div> ); } export default Login;
mrtangwei/kooo.ldxy.edu.cn
commons.ldxy.edu.cn/src/main/java/cn/edu/ldxy/commons/domain/Log.java
/** * 日志统计对象 */ package cn.edu.ldxy.commons.domain; import lombok.Getter; import lombok.Setter; import org.springframework.data.annotation.Id; import org.springframework.data.mongodb.core.mapping.Document; import org.springframework.format.annotation.DateTimeFormat; import java.util.Date; /** * @author Kooo * */ @Setter @Getter @Document(collection = "logs") public class Log extends IdEntity { /** * */ private static final long serialVersionUID = -8681914465470191080L; @Id private String id; /** * 操作 */ private String op; @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) private Date time; private String exe; private String ip; }
jinsen47/Code4Work
interviews/src/com/github/jinsen47/yidianzixun/StrangeLoft.java
package com.github.jinsen47.yidianzixun; import java.util.ArrayList; import java.util.List; import java.util.Scanner; /** * Created by Jinsen on 16/9/22. * 奇怪的电梯 * 一个楼有n层高 * 电梯很奇怪, 在不同的层只能走固定的层, 每一层能走的层数由一个数组给出, 只能走该数的层数 * 例如 第二层给出的数字为2, 只能走到第4层 * * 输入第一行为3数字n, a, b, 层数, 开始的层, 到达的层 * 第二行为 一个长度为n的数组, 表示每一层可以上下的层数 * * 输出为 两行, 第一行为到达所走的次数, 第二行为从a->b经过的所有层, 如果不能到达, 输出-1 * 测试用例输入: * 5 1 5 * 3 3 1 2 5 * 输出: * 3 * 1 4 2 5 */ public class StrangeLoft { public static void main(String[] args) { Scanner in = new Scanner(System.in); while (in.hasNext()) { int n = in.nextInt(); int a = in.nextInt(); int b = in.nextInt(); if (n <= 0 || a <= 0 || b <=0 || a > n || b > n ) { System.out.println("-1"); continue; } int[] input = new int[n+1]; for (int i = 1; i <= n; i++) { input[i] = in.nextInt(); } nextMove(a, b, input, new ArrayList<Integer>()); } } private static void nextMove(int curLevel, final int destLevel, final int[] input, List<Integer> traveledList) { if (curLevel == destLevel) { traveledList.add(curLevel); System.out.println(traveledList.size() - 1); for (int i : traveledList) { System.out.print(i + " "); } traveledList.remove(traveledList.size() - 1); return; } if (traveledList.contains(curLevel)) { System.out.println(-1); } else { int nextUp = curLevel + input[curLevel]; int nextDown = curLevel - input[curLevel]; traveledList.add(curLevel); if (nextUp < input.length) { nextMove(nextUp, destLevel, input, traveledList); } if (nextDown > 0) { nextMove(nextDown, destLevel, input, traveledList); } traveledList.remove(traveledList.size() - 1); } } }
sidharthsapru/scalaz-stream-mongodb
core/src/main/scala/scalaz/stream/mongodb/update/WriteResult.scala
<filename>core/src/main/scala/scalaz/stream/mongodb/update/WriteResult.scala package scalaz.stream.mongodb.update import org.bson.types.ObjectId import com.mongodb.DBObject import scalaz.stream.mongodb.collectionSyntax._ /** * Encapsulation of mongo's write result in more scala like syntax */ sealed trait WriteResult { val n : Int val errorMessage: Option[String] val serverUri : Option[String] val ok: Boolean } /** * Result of write operation as returned when updating the document * @param n documents affected * @param errorMessage Message in case the operation was not successful * @param serverUri uri of server where request was executed */ case class UpdateWriteResult(n: Int, errorMessage: Option[String], serverUri: Option[String]) extends WriteResult { val ok = errorMessage.isEmpty } /** * Result of Insert Operation. * @param n documents affected * @param errorMessage Message in case the operation was not successful * @param serverUri uri of server where request was executed * @param document document inserted */ case class InsertWriteResult(n: Int, errorMessage: Option[String], serverUri: Option[String], document: DBObject) extends WriteResult { lazy val id : ObjectId = document.as[ObjectId]("_id") val ok = errorMessage.isEmpty } object WriteResult { def apply(r: com.mongodb.WriteResult): WriteResult = UpdateWriteResult(r.getN, None, Option(r.getField("serverUsed")).map(_.toString)) def apply(r: com.mongodb.WriteResult, document: => DBObject): WriteResult = InsertWriteResult(r.getN, None, Option(r.getField("serverUsed")).map(_.toString), document) }
timfel/netbeans
ide/schema2beans/test/unit/data/TestPurchaseOrder.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * TestPurchaseOrder - test the basic features. * * The following test assumes that we know the content of the * graph as we get elements, add and change them. Therefore, the TestPurchaseOrder.xml * file and this java test should be kept in sync. * * Test the following: * * single String: get/set/remove/set/get * boolean (from true): get/set true/get/set false/get/set true/get * boolean (from false): get/set false/get/set true/get/set false/get * String[]: get/set (null & !null)/add/remove * Bean: remove/set(null)/create bean and graph of beans/set/add * */ import java.io.*; import java.util.*; import org.w3c.dom.*; import purchaseorder.*; public class TestPurchaseOrder extends BaseTest { public static void main(String[] argv) { TestPurchaseOrder o = new TestPurchaseOrder(); if (argv.length > 0) o.setDocumentDir(argv[0]); try { o.run(); } catch (Exception e) { e.printStackTrace(); System.exit(1); } System.exit(0); } public void run() throws Exception { PurchaseOrder po; this.readDocument(); out("creating the bean graph"); po = PurchaseOrder.read(doc); // Check that we can read the graph an it is complete out("bean graph created"); po.write(out); out("Adjusting shipTo"); USAddress shipTo = new USAddress(); po.setShipTo(shipTo); shipTo.setCountry("B&B's are us"); shipTo.setName("To you & me"); po.write(out); out("Adding a Daffodil"); Items items = po.getItems(); Item item = new Item(); item.setProductName("Daffodil"); item.setQuantity(3); item.setUSPrice(new java.math.BigDecimal("1.00")); item.setComment("Yellow & Green"); int position = items.addItem(item); po.write(out); check(items.getItem(position) == item, "addItem returned correct position"); out("Removing that Daffodil"); items.removeItem(item); po.write(out); Item[] itemsArray = items.getItem(); /* out(""+itemsArray.length); check(itemsArray.length == 2, "we should have 2 items now"); */ check(items.sizeItem() == 2, "sizeItem should return 2"); PurchaseOrder po2 = new PurchaseOrder(po); check(po != po2, "po should not be po2"); check(po.equals(po2), "po should equal po2"); check(po2.equals(po), "po2 should equal po"); /* po.write(out); po2.write(out); */ po._setSchemaLocation("flurp"); po.write(out); } }
awesome-archive/one-scan
app/tasks/periodic_task.py
<filename>app/tasks/periodic_task.py """ 周期任务 """ from app.util.cache_util import GLOBAL_LOCAL_CACHE from app.util import time_util def delete_expired_local_cache(): """ 删除过期的本地缓存 """ now_time = time_util.timestamp() delete_keys = [ key for key in GLOBAL_LOCAL_CACHE if GLOBAL_LOCAL_CACHE[key]["expire"] < now_time ] for key in delete_keys: GLOBAL_LOCAL_CACHE.pop(key, None)
FPSP-Modpack/amunra
src/main/java/de/katzenpapst/amunra/block/BlockGrassMeta.java
package de.katzenpapst.amunra.block; import java.util.Random; import micdoodle8.mods.galacticraft.api.prefab.core.BlockMetaPair; import net.minecraft.block.Block; import net.minecraft.block.IGrowable; import net.minecraft.block.material.Material; import net.minecraft.world.World; public class BlockGrassMeta extends BlockBasicMeta implements IGrowable { public BlockGrassMeta(String name, Material mat) { super(name, mat); this.setTickRandomly(true); } @Override public BlockMetaPair addSubBlock(int meta, SubBlock sb) { if(!(sb instanceof SubBlockGrass)) { throw new IllegalArgumentException("BlockGrassMulti can only accept SubBlockGrass"); } return super.addSubBlock(meta, sb); } /** * func_149851_a is basically a stillGrowing() method. * It returns (or should return) true if the growth stage is less than the max growth stage. * * info source: http://www.minecraftforge.net/forum/index.php?topic=22571.0 */ @Override public boolean func_149851_a(World world, int x, int y, int z, boolean isWorldRemote) { return true; } /** * func_149852_a is basically a canBoneMealSpeedUpGrowth() method. * I usually just return true, but depends on your crop. */ @Override public boolean func_149852_a(World world, Random rand, int x, int y, int z) { return true; } /** * Ticks the block if it's been scheduled */ @Override public void updateTick(World world, int x, int y, int z, Random rand) { int meta = world.getBlockMetadata(x, y, z); SubBlockGrass sb = (SubBlockGrass) this.getSubBlock(meta); BlockMetaPair dirtForm = sb.getDirtBlock(); if (!world.isRemote) { if (!sb.canLiveHere(world, x, y, z)) { world.setBlock(x, y, z, dirtForm.getBlock(), dirtForm.getMetadata(), 3); } else if (sb.canSpread(world, x, y, z)) { for (int l = 0; l < 4; ++l) { int nbX = x + rand.nextInt(3) - 1; int nbY = y + rand.nextInt(5) - 3; int nbZ = z + rand.nextInt(3) - 1; Block block = world.getBlock(nbX, nbY, nbZ); int blockMeta = world.getBlockMetadata(nbX, nbY, nbZ); if (block == dirtForm.getBlock() && blockMeta == dirtForm.getMetadata()) { boolean canLive = sb.canLiveHere(world, nbX, nbY, nbZ); if(canLive) { world.setBlock(nbX, nbY, nbZ, this, meta, 3); } } } } } } /** * func_149853_b is basically an incrementGrowthStage() method. * In vanilla crops the growth stage is stored in metadata so then in this method * you would increment it if it wasn't already at maximum and store back in metadata. * */ @Override public void func_149853_b(World world, Random rand, int x, int y, int z) { int l = 0; int meta = world.getBlockMetadata(x, y, z); SubBlockGrass sb = (SubBlockGrass) this.getSubBlock(meta); while (l < 128) { int blockAboveX = x; int blockAboveY = y + 1; int blockAboveZ = z; int grassNearby = 0; while (true) { if (grassNearby < l / 16) // why 1/16?? { blockAboveX += rand.nextInt(3) - 1; blockAboveY += (rand.nextInt(3) - 1) * rand.nextInt(3) / 2; blockAboveZ += rand.nextInt(3) - 1; if (world.getBlock(blockAboveX, blockAboveY - 1, blockAboveZ) == this && // I hope I can use "this" here world.getBlockMetadata(blockAboveX, blockAboveY, blockAboveZ) == meta && !world.getBlock(blockAboveX, blockAboveY, blockAboveZ).isNormalCube()) { ++grassNearby; continue; } } else if (world.getBlock(blockAboveX, blockAboveY, blockAboveZ).getMaterial() == Material.air) { sb.growPlantsOnTop(world, rand, blockAboveX, blockAboveY, blockAboveZ); } ++l; break; } } } }
al3xliu/checker-framework
checker/tests/lock/ItselfExpressionCases.java
import org.checkerframework.checker.lock.qual.*; import org.checkerframework.checker.nullness.qual.*; import org.checkerframework.dataflow.qual.*; public class ItselfExpressionCases { final Object somelock = new Object(); private @GuardedBy({"<self>"}) MyClass guardedBySelf() { return new MyClass(); } private final @GuardedBy({"<self>"}) MyClass m = guardedBySelf(); @Pure private @GuardedBy({"<self>"}) MyClass getm() { return m; } @Pure private @GuardedBy({"<self>"}) MyClass getm2(@GuardedBy("<self>") ItselfExpressionCases this) { // The following error is due to the precondition of the this.m field dereference not being // satisfied. // :: error: (lock.not.held) return m; } @Pure private Object getmfield() { // :: error: (lock.not.held) return getm().field; } public void arrayTest(final Object @GuardedBy("<self>") [] a1) { // :: error: (lock.not.held) Object a = a1[0]; synchronized (a1) { a = a1[0]; } } Object @GuardedBy("<self>") [] a2; @Pure public Object @GuardedBy("<self>") [] geta2() { return a2; } public void arrayTest() { // :: error: (lock.not.held) Object a = geta2()[0]; synchronized (geta2()) { a = geta2()[0]; } } public void testCheckPreconditions( final @GuardedBy("<self>") MyClass o, @GuardSatisfied Object gs, @GuardSatisfied MyClass gsMyClass) { // :: error: (lock.not.held) getm().field = new Object(); synchronized (getm()) { getm().field = new Object(); } // :: error: (lock.not.held) m.field = new Object(); synchronized (m) { m.field = new Object(); } // :: error: (lock.not.held) gs = m.field; synchronized (m) { gs = m.field; } // :: error: (lock.not.held) gs = getm().field; synchronized (getm()) { gs = getm().field; } // :: error: (lock.not.held) gsMyClass = getm(); synchronized (getm()) { gsMyClass = getm(); } // :: error: (lock.not.held) :: error: (contracts.precondition.not.satisfied) o.foo(); synchronized (o) { // :: error: (contracts.precondition.not.satisfied) o.foo(); synchronized (somelock) { // o.foo() requires o.somelock is held, not this.somelock. // :: error: (contracts.precondition.not.satisfied) o.foo(); } } // :: error: (lock.not.held) o.foo2(); synchronized (o) { o.foo2(); } } class MyClass { Object field = new Object(); @Holding("somelock") void foo(@GuardSatisfied MyClass this) {} void foo2(@GuardSatisfied MyClass this) {} void method(@GuardedBy("<self>") MyClass this) { // :: error: (lock.not.held) :: error: (contracts.precondition.not.satisfied) this.foo(); // :: error: (lock.not.held):: error: (contracts.precondition.not.satisfied) foo(); // :: error: (lock.not.held) synchronized (somelock) { // :: error: (lock.not.held) this.foo(); // :: error: (lock.not.held) foo(); synchronized (this) { this.foo(); foo(); } } // :: error: (lock.not.held) this.foo2(); // :: error: (lock.not.held) foo2(); synchronized (this) { this.foo2(); foo2(); } } } }
Uswer/LineageOS-14.1_jag3gds
kernel/lge/msm8226/sound/soc/msm/qdsp6/q6adm.c
<gh_stars>1-10 /* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/jiffies.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/err.h> #include <mach/qdsp6v2/audio_dev_ctl.h> #include <mach/qdsp6v2/audio_acdb.h> #include <mach/qdsp6v2/rtac.h> #include <sound/apr_audio.h> #include <sound/q6afe.h> #define TIMEOUT_MS 1000 #define AUDIO_RX 0x0 #define AUDIO_TX 0x1 #define ASM_MAX_SESSION 0x8 /* To do: define in a header */ #define RESET_COPP_ID 99 #define INVALID_COPP_ID 0xFF struct adm_ctl { void *apr; atomic_t copp_id[AFE_MAX_PORTS]; atomic_t copp_cnt[AFE_MAX_PORTS]; atomic_t copp_stat[AFE_MAX_PORTS]; wait_queue_head_t wait; int ec_ref_rx; }; static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES]; static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES]; static struct adm_ctl this_adm; static int pseudo_copp[2]; int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params) { struct asm_pp_params_command *open = NULL; int ret = 0, sz = 0; int index; pr_debug("SRS - %s", __func__); index = afe_get_port_index(port_id); if (IS_ERR_VALUE(index)) { pr_err("%s: invald port id\n", __func__); return index; } switch (srs_tech_id) { case SRS_ID_GLOBAL: { struct srs_trumedia_params_GLOBAL *glb_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_GLOBAL); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_GLOBAL) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS; open->params.param_size = sizeof(struct srs_trumedia_params_GLOBAL); glb_params = (struct srs_trumedia_params_GLOBAL *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(glb_params, srs_params, sizeof(struct srs_trumedia_params_GLOBAL)); pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x," " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n", __func__, (int)glb_params->v1, (int)glb_params->v2, (int)glb_params->v3, (int)glb_params->v4, (int)glb_params->v5, (int)glb_params->v6, (int)glb_params->v7, (int)glb_params->v8); break; } case SRS_ID_WOWHD: { struct srs_trumedia_params_WOWHD *whd_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_WOWHD); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_WOWHD) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD; open->params.param_size = sizeof(struct srs_trumedia_params_WOWHD); whd_params = (struct srs_trumedia_params_WOWHD *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(whd_params, srs_params, sizeof(struct srs_trumedia_params_WOWHD)); pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x," " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x," " 10 = %x, 11 = %x\n", __func__, (int)whd_params->v1, (int)whd_params->v2, (int)whd_params->v3, (int)whd_params->v4, (int)whd_params->v5, (int)whd_params->v6, (int)whd_params->v7, (int)whd_params->v8, (int)whd_params->v9, (int)whd_params->v10, (int)whd_params->v11); break; } case SRS_ID_CSHP: { struct srs_trumedia_params_CSHP *chp_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_CSHP); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_CSHP) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP; open->params.param_size = sizeof(struct srs_trumedia_params_CSHP); chp_params = (struct srs_trumedia_params_CSHP *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(chp_params, srs_params, sizeof(struct srs_trumedia_params_CSHP)); pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x," " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x," " 9 = %x\n", __func__, (int)chp_params->v1, (int)chp_params->v2, (int)chp_params->v3, (int)chp_params->v4, (int)chp_params->v5, (int)chp_params->v6, (int)chp_params->v7, (int)chp_params->v8, (int)chp_params->v9); break; } case SRS_ID_HPF: { struct srs_trumedia_params_HPF *hpf_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_HPF); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_HPF) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_HPF; open->params.param_size = sizeof(struct srs_trumedia_params_HPF); hpf_params = (struct srs_trumedia_params_HPF *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(hpf_params, srs_params, sizeof(struct srs_trumedia_params_HPF)); pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__, (int)hpf_params->v1); break; } case SRS_ID_PEQ: { struct srs_trumedia_params_PEQ *peq_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_PEQ); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_PEQ) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ; open->params.param_size = sizeof(struct srs_trumedia_params_PEQ); peq_params = (struct srs_trumedia_params_PEQ *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(peq_params, srs_params, sizeof(struct srs_trumedia_params_PEQ)); pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x," " 4 = %x\n", __func__, (int)peq_params->v1, (int)peq_params->v2, (int)peq_params->v3, (int)peq_params->v4); break; } case SRS_ID_HL: { struct srs_trumedia_params_HL *hl_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_HL); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_HL) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_HL; open->params.param_size = sizeof(struct srs_trumedia_params_HL); hl_params = (struct srs_trumedia_params_HL *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(hl_params, srs_params, sizeof(struct srs_trumedia_params_HL)); pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x," " 5 = %x, 6 = %x, 7 = %x\n", __func__, (int)hl_params->v1, (int)hl_params->v2, (int)hl_params->v3, (int)hl_params->v4, (int)hl_params->v5, (int)hl_params->v6, (int)hl_params->v7); break; } default: goto fail_cmd; } open->payload = NULL; open->params.module_id = SRS_TRUMEDIA_MODULE_ID; open->params.reserved = 0; open->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open->hdr.pkt_size = sz; open->hdr.src_svc = APR_SVC_ADM; open->hdr.src_domain = APR_DOMAIN_APPS; open->hdr.src_port = port_id; open->hdr.dest_svc = APR_SVC_ADM; open->hdr.dest_domain = APR_DOMAIN_ADSP; open->hdr.dest_port = atomic_read(&this_adm.copp_id[index]); open->hdr.token = <PASSWORD>_id; open->hdr.opcode = ADM_CMD_SET_PARAMS; pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d," " size %d, module id %x, param id %x.\n", __func__, open->hdr.dest_port, open->payload_size, open->params.module_id, open->params.param_id); ret = apr_send_pkt(this_adm.apr, (uint32_t *)open); if (ret < 0) { pr_err("SRS - %s: ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, 1, msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("SRS - %s: ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(open); return ret; } struct SS3D { int _1; int _2; short _3; short _4; short _5; short _6; int _7; int _X[32]; short _8; short _9; short _10; short _11; short _12; short _13; short _14; short _15; short _16; short _17; short _18; short _19; short _20; short _21; short _22; short _23; short _24; short _25; short _26[5]; short _27; short _28; short _29; short _30; short _31; short _32; short _33; int _34; int _35; int _36; int _37; int _38; int _39; int _40; }; struct SS3D_F { int _1; int _2; int _3; int _4; int _5; int _6; int _7; int _X[]; }; int srs_ss3d_open(int port_id, int srs_tech_id, void *srs_params) { struct asm_pp_params_command *open = NULL; int ret = 0, sz = 0; int index; pr_debug("SRS - %s: called.", __func__); switch (srs_tech_id) { case SRS_ID_SS3D_GLOBAL: { struct srs_SS3D_params_GLOBAL *glb_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_SS3D_params_GLOBAL); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_SS3D_params_GLOBAL) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_SS3D_PARAMS; open->params.param_size = sizeof(struct srs_SS3D_params_GLOBAL); glb_params = (struct srs_SS3D_params_GLOBAL *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(glb_params, srs_params, sizeof(struct srs_SS3D_params_GLOBAL)); pr_debug("SRS - ss3d global params - 1 = %x, 2 = %x, 3 = %x\n" " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n", (int)glb_params->v1, (int)glb_params->v2, (int)glb_params->v3, (int)glb_params->v4, (int)glb_params->v5, (int)glb_params->v6, (int)glb_params->v7, (int)glb_params->v8); break; } case SRS_ID_SS3D_CTRL: { struct srs_SS3D_ctrl_params *whd_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_SS3D_ctrl_params); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_SS3D_ctrl_params) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_SS3D_PARAMS_CTRL; open->params.param_size = sizeof(struct srs_SS3D_ctrl_params); whd_params = (struct srs_SS3D_ctrl_params *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(whd_params, srs_params, sizeof(struct srs_SS3D_ctrl_params)); { struct SS3D *D = (struct SS3D *)whd_params->v; pr_debug("SRS - ss3d ctrl params\n" "1 = 0x%08X, 2 = 0x%08X, 3 = 0x%04X,\n" "4 = 0x%04X, 5 = 0x%04X, 6 = 0x%04X,\n" "7 = 0x%08X, 8 = 0x%04X, 9 = 0x%04X,\n" "10 = 0x%04X, 11 = 0x%04X, 12 = 0x%04X,\n" "13 = 0x%04X, 14 = 0x%04X, 15 = 0x%04X,\n" "16 = 0x%04X, 17 = 0x%04X, 18 = 0x%04X,\n" "19 = 0x%04X, 20 = 0x%04X, 21 = 0x%04X,\n" "22 = 0x%04X, 23 = 0x%04X, 24 = 0x%04X,\n" "25 = 0x%04X, 26.0 = 0x%04X, 26.1 = 0x%04X,\n" "26.2 = 0x%04X, 26.3 = 0x%04X,\n" "26.4 = 0x%04X, 27 = 0x%04X, 28 = 0x%04X,\n" "29 = 0x%04X, 30 = 0x%04X, 31 = 0x%04X,\n" "32 = 0x%04X, 33 = 0x%04X, 34 = 0x%08X,\n" "35 = 0x%08X, 36 = 0x%08X, 37 = 0x%08X,\n" "38 = 0x%08X, 39 = 0x%08X, 40 = 0x%08X", D->_1, D->_2, D->_3, D->_4, D->_5, D->_6, D->_7, D->_8, D->_9, D->_10, D->_11, D->_12, D->_13, D->_14, D->_15, D->_16, D->_17, D->_18, D->_19, D->_20, D->_21, D->_22, D->_23, D->_24, D->_25, D->_26[0], D->_26[1], D->_26[2], D->_26[3], D->_26[4], D->_27, D->_28, D->_29, D->_30, D->_31, D->_32, D->_33, D->_34, D->_35, D->_36, D->_37, D->_38, D->_39, D->_40); } break; } case SRS_ID_SS3D_FILTER: { struct srs_SS3D_filter_params *chp_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_SS3D_filter_params); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_SS3D_filter_params) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_SS3D_PARAMS_FILTER; open->params.param_size = sizeof(struct srs_SS3D_filter_params); chp_params = (struct srs_SS3D_filter_params *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(chp_params, srs_params, sizeof(struct srs_SS3D_filter_params)); { struct SS3D_F *D = (struct SS3D_F *)chp_params->v; pr_debug("SRS - ss3d filter params\n" "1 = 0x%08X, 2 = 0x%08X, 3 = 0x%08X\n" "4 = 0x%08X, 5 = 0x%08X, 6 = 0x%08X\n" "7 = 0x%08X", D->_1, D->_2, D->_3, D->_4, D->_5, D->_6, D->_7); } break; } default: pr_debug("SRS - bad param!\n"); goto fail_cmd; } open->payload = NULL; open->params.module_id = SRS_SS3D_MODULE_ID; open->params.reserved = 0; open->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open->hdr.pkt_size = sz; open->hdr.src_svc = APR_SVC_ADM; open->hdr.src_domain = APR_DOMAIN_APPS; open->hdr.src_port = port_id; open->hdr.dest_svc = APR_SVC_ADM; open->hdr.dest_domain = APR_DOMAIN_ADSP; index = afe_get_port_index(port_id); open->hdr.dest_port = atomic_read(&this_adm.copp_id[index]); /* port_id;//atomic_read(&this_adm.copp_id[port_id]); */ open->hdr.token = <PASSWORD>_id; open->hdr.opcode = ADM_CMD_SET_PARAMS; pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d,\n" "size %d, module id %x, param id %x.\n", __func__, open->hdr.dest_port, open->payload_size, open->params.module_id, open->params.param_id); ret = apr_send_pkt(this_adm.apr, (uint32_t *)open); if (ret < 0) { pr_err("SRS - %s: ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, 1, msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("SRS - %s: ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(open); return ret; } static int32_t adm_callback(struct apr_client_data *data, void *priv) { uint32_t *payload; int i, index; payload = data->payload; if (data->opcode == RESET_EVENTS) { pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n", data->reset_event, data->reset_proc, this_adm.apr); if (this_adm.apr) { apr_reset(this_adm.apr); for (i = 0; i < AFE_MAX_PORTS; i++) { atomic_set(&this_adm.copp_id[i], RESET_COPP_ID); atomic_set(&this_adm.copp_cnt[i], 0); atomic_set(&this_adm.copp_stat[i], 0); } this_adm.apr = NULL; } pr_debug("Resetting calibration blocks"); for (i = 0; i < MAX_AUDPROC_TYPES; i++) { /* Device calibration */ mem_addr_audproc[i].cal_size = 0; mem_addr_audproc[i].cal_kvaddr = 0; mem_addr_audproc[i].cal_paddr = 0; /* Volume calibration */ mem_addr_audvol[i].cal_size = 0; mem_addr_audvol[i].cal_kvaddr = 0; mem_addr_audvol[i].cal_paddr = 0; } return 0; } pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__, data->opcode, payload[0], payload[1], data->payload_size); if (data->payload_size) { index = afe_get_port_index(data->token); pr_debug("%s: Port ID %d, index %d\n", __func__, data->token, index); if (index < 0 || index >= AFE_MAX_PORTS) { pr_err("%s: invalid port idx %d token %d\n", __func__, index, data->token); return 0; } if (data->opcode == APR_BASIC_RSP_RESULT) { pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]); switch (payload[0]) { case ADM_CMD_SET_PARAMS: if (rtac_make_adm_callback(payload, data->payload_size)) break; case ADM_CMD_COPP_CLOSE: case ADM_CMD_MEMORY_MAP: case ADM_CMD_MEMORY_UNMAP: case ADM_CMD_MEMORY_MAP_REGIONS: case ADM_CMD_MEMORY_UNMAP_REGIONS: case ADM_CMD_MATRIX_MAP_ROUTINGS: case ADM_CMD_CONNECT_AFE_PORT: case ADM_CMD_DISCONNECT_AFE_PORT: case ADM_CMD_CONNECT_AFE_PORT_V2: case ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3: atomic_set(&this_adm.copp_stat[index], 1); wake_up(&this_adm.wait); break; default: pr_err("%s: Unknown Cmd: 0x%x\n", __func__, payload[0]); break; } return 0; } switch (data->opcode) { case ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN_V3: case ADM_CMDRSP_COPP_OPEN: case ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN: { struct adm_copp_open_respond *open = data->payload; if (open->copp_id == INVALID_COPP_ID) { pr_err("%s: invalid coppid rxed %d\n", __func__, open->copp_id); atomic_set(&this_adm.copp_stat[index], 1); wake_up(&this_adm.wait); break; } if (index == IDX_PSEUDOPORT_01) pseudo_copp[ atomic_read(&this_adm.copp_cnt[index])] = open->copp_id; atomic_set(&this_adm.copp_id[index], open->copp_id); atomic_set(&this_adm.copp_stat[index], 1); pr_debug("%s: coppid rxed=%d\n", __func__, open->copp_id); wake_up(&this_adm.wait); } break; case ADM_CMDRSP_GET_PARAMS: pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__); rtac_make_adm_callback(payload, data->payload_size); break; default: pr_err("%s: Unknown cmd:0x%x\n", __func__, data->opcode); break; } } return 0; } int adm_connect_afe_port_v2(int mode, int session_id, int port_id, int sample_rate, int channels) { struct adm_cmd_connect_afe_port_v2 cmd; int ret = 0; int index; pr_debug("%s: port %d session id:%d\n", __func__, port_id, session_id); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cmd.hdr.pkt_size = sizeof(cmd); cmd.hdr.src_svc = APR_SVC_ADM; cmd.hdr.src_domain = APR_DOMAIN_APPS; cmd.hdr.src_port = port_id; cmd.hdr.dest_svc = APR_SVC_ADM; cmd.hdr.dest_domain = APR_DOMAIN_ADSP; cmd.hdr.dest_port = port_id; cmd.hdr.token = port_id; cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V2; cmd.mode = mode; cmd.session_id = session_id; cmd.afe_port_id = port_id; cmd.num_channels = channels; cmd.sampling_rate = sample_rate; atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM connect AFE failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; } static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal) { s32 result = 0; struct adm_set_params_command adm_params; int index = afe_get_port_index(port_id); if (index < 0 || index >= AFE_MAX_PORTS) { pr_err("%s: invalid port idx %d portid %d\n", __func__, index, port_id); return 0; } pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index); if (!aud_cal || aud_cal->cal_size == 0) { pr_debug("%s: No ADM cal to send for port_id = %d!\n", __func__, port_id); result = -EINVAL; goto done; } adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(adm_params)); adm_params.hdr.src_svc = APR_SVC_ADM; adm_params.hdr.src_domain = APR_DOMAIN_APPS; adm_params.hdr.src_port = port_id; adm_params.hdr.dest_svc = APR_SVC_ADM; adm_params.hdr.dest_domain = APR_DOMAIN_ADSP; adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); adm_params.hdr.token = port_id; adm_params.hdr.opcode = ADM_CMD_SET_PARAMS; adm_params.payload = aud_cal->cal_paddr; adm_params.payload_size = aud_cal->cal_size; atomic_set(&this_adm.copp_stat[index], 0); pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n", __func__, adm_params.payload, adm_params.payload_size); result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params); if (result < 0) { pr_err("%s: Set params failed port = %d payload = 0x%x\n", __func__, port_id, aud_cal->cal_paddr); result = -EINVAL; goto done; } /* Wait for the callback */ result = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!result) { pr_err("%s: Set params timed out port = %d, payload = 0x%x\n", __func__, port_id, aud_cal->cal_paddr); result = -EINVAL; goto done; } result = 0; done: return result; } static void send_adm_cal(int port_id, int path) { int result = 0; s32 acdb_path; struct acdb_cal_block aud_cal; pr_debug("%s\n", __func__); /* Maps audio_dev_ctrl path definition to ACDB definition */ acdb_path = path - 1; pr_debug("%s: Sending audproc cal\n", __func__); get_audproc_cal(acdb_path, &aud_cal); /* map & cache buffers used */ if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) || (aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) { if (mem_addr_audproc[acdb_path].cal_paddr != 0) adm_memory_unmap_regions( &mem_addr_audproc[acdb_path].cal_paddr, &mem_addr_audproc[acdb_path].cal_size, 1); result = adm_memory_map_regions(&aud_cal.cal_paddr, 0, &aud_cal.cal_size, 1); if (result < 0) pr_err("ADM audproc mmap did not work! path = %d, " "addr = 0x%x, size = %d\n", acdb_path, aud_cal.cal_paddr, aud_cal.cal_size); else mem_addr_audproc[acdb_path] = aud_cal; } if (!send_adm_cal_block(port_id, &aud_cal)) pr_debug("%s: Audproc cal sent for port id: %d, path %d\n", __func__, port_id, acdb_path); else pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n", __func__, port_id, acdb_path); pr_debug("%s: Sending audvol cal\n", __func__); get_audvol_cal(acdb_path, &aud_cal); /* map & cache buffers used */ if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) && (aud_cal.cal_size > 0)) || (aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) { if (mem_addr_audvol[acdb_path].cal_paddr != 0) adm_memory_unmap_regions( &mem_addr_audvol[acdb_path].cal_paddr, &mem_addr_audvol[acdb_path].cal_size, 1); result = adm_memory_map_regions(&aud_cal.cal_paddr, 0, &aud_cal.cal_size, 1); if (result < 0) pr_err("ADM audvol mmap did not work! path = %d, " "addr = 0x%x, size = %d\n", acdb_path, aud_cal.cal_paddr, aud_cal.cal_size); else mem_addr_audvol[acdb_path] = aud_cal; } if (!send_adm_cal_block(port_id, &aud_cal)) pr_debug("%s: Audvol cal sent for port id: %d, path %d\n", __func__, port_id, acdb_path); else pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n", __func__, port_id, acdb_path); } int adm_connect_afe_port(int mode, int session_id, int port_id) { struct adm_cmd_connect_afe_port cmd; int ret = 0; int index; pr_debug("%s: port %d session id:%d mode:%d\n", __func__, port_id, session_id, mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cmd.hdr.pkt_size = sizeof(cmd); cmd.hdr.src_svc = APR_SVC_ADM; cmd.hdr.src_domain = APR_DOMAIN_APPS; cmd.hdr.src_port = port_id; cmd.hdr.dest_svc = APR_SVC_ADM; cmd.hdr.dest_domain = APR_DOMAIN_ADSP; cmd.hdr.dest_port = port_id; cmd.hdr.token = port_id; cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT; cmd.mode = mode; cmd.session_id = session_id; cmd.afe_port_id = port_id; atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM connect AFE failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; } int adm_disconnect_afe_port(int mode, int session_id, int port_id) { struct adm_cmd_connect_afe_port cmd; int ret = 0; int index; pr_debug("%s: port %d session id:%d mode:%d\n", __func__, port_id, session_id, mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cmd.hdr.pkt_size = sizeof(cmd); cmd.hdr.src_svc = APR_SVC_ADM; cmd.hdr.src_domain = APR_DOMAIN_APPS; cmd.hdr.src_port = port_id; cmd.hdr.dest_svc = APR_SVC_ADM; cmd.hdr.dest_domain = APR_DOMAIN_ADSP; cmd.hdr.dest_port = port_id; cmd.hdr.token = port_id; cmd.hdr.opcode = ADM_CMD_DISCONNECT_AFE_PORT; cmd.mode = mode; cmd.session_id = session_id; cmd.afe_port_id = port_id; atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM connect AFE failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } atomic_dec(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; } int adm_open(int port_id, int path, int rate, int channel_mode, int topology) { struct adm_copp_open_command open; int ret = 0; int index; pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__, port_id, path, rate, channel_mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } /* Create a COPP if port id are not enabled */ if (atomic_read(&this_adm.copp_cnt[index]) == 0) { open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open.hdr.pkt_size = sizeof(open); open.hdr.src_svc = APR_SVC_ADM; open.hdr.src_domain = APR_DOMAIN_APPS; open.hdr.src_port = port_id; open.hdr.dest_svc = APR_SVC_ADM; open.hdr.dest_domain = APR_DOMAIN_ADSP; open.hdr.dest_port = port_id; open.hdr.token = <PASSWORD>_id; open.hdr.opcode = ADM_CMD_COPP_OPEN; open.mode = path; open.endpoint_id1 = port_id; if (this_adm.ec_ref_rx == 0) { open.endpoint_id2 = 0xFFFF; } else if (this_adm.ec_ref_rx && (path != 1)) { open.endpoint_id2 = this_adm.ec_ref_rx; this_adm.ec_ref_rx = 0; } pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d", __func__, open.endpoint_id1, open.endpoint_id2); /* convert path to acdb path */ if (path == ADM_PATH_PLAYBACK) open.topology_id = get_adm_rx_topology(); else { open.topology_id = get_adm_tx_topology(); if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) || (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY)) rate = 16000; } if (open.topology_id == 0) open.topology_id = topology; open.channel_config = channel_mode & 0x00FF; open.rate = rate; pr_debug("%s: channel_config=%d port_id=%d rate=%d" "topology_id=0x%X\n", __func__, open.channel_config,\ open.endpoint_id1, open.rate,\ open.topology_id); atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; } int adm_multi_ch_copp_pseudo_open_v3(int port_id, int path, int rate, int channel_mode, int topology) { struct adm_multi_channel_copp_open_v3 open; int ret = 0; int index; pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__, port_id, path, rate, channel_mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } { open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open.hdr.pkt_size = sizeof(open); open.hdr.src_svc = APR_SVC_ADM; open.hdr.src_domain = APR_DOMAIN_APPS; open.hdr.src_port = port_id; open.hdr.dest_svc = APR_SVC_ADM; open.hdr.dest_domain = APR_DOMAIN_ADSP; open.hdr.dest_port = port_id; open.hdr.token = <PASSWORD>_id; open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3; memset(open.dev_channel_mapping, 0, 8); if (channel_mode == 1) { open.dev_channel_mapping[0] = PCM_CHANNEL_FC; } else if (channel_mode == 2) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; } else if (channel_mode == 4) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_LS; open.dev_channel_mapping[3] = PCM_CHANNEL_RS; } else if (channel_mode == 6) { open.dev_channel_mapping[0] = PCM_CHANNEL_FC; open.dev_channel_mapping[1] = PCM_CHANNEL_FL; open.dev_channel_mapping[2] = PCM_CHANNEL_FR; open.dev_channel_mapping[3] = PCM_CHANNEL_LS; open.dev_channel_mapping[4] = PCM_CHANNEL_RS; open.dev_channel_mapping[5] = PCM_CHANNEL_LFE; } else { pr_err("%s invalid num_chan %d\n", __func__, channel_mode); return -EINVAL; } open.mode = path; open.endpoint_id1 = port_id; open.endpoint_id2 = 0xFFFF; open.bit_width = 16; if (path == ADM_PATH_PLAYBACK) open.topology_id = get_adm_rx_topology(); else { open.topology_id = get_adm_tx_topology(); if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) || (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY)) rate = 16000; } if (open.topology_id == 0) open.topology_id = topology; open.channel_config = channel_mode & 0x00FF; open.rate = rate; open.flags = 0; pr_debug("%s: channel_config=%d port_id=%d rate=%d" \ "topology_id=0x%X\n", __func__, open.channel_config,\ open.endpoint_id1, open.rate,\ open.topology_id); atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; } int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode, int topology, int perfmode) { struct adm_multi_ch_copp_open_command open; int ret = 0; int index; pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__, port_id, path, rate, channel_mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } /* Create a COPP if port id are not enabled */ if (atomic_read(&this_adm.copp_cnt[index]) == 0) { open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open.hdr.pkt_size = sizeof(struct adm_multi_ch_copp_open_command); if (perfmode) { pr_debug("%s Performance mode", __func__); open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3; open.flags = ADM_MULTI_CH_COPP_OPEN_PERF_MODE_BIT; open.reserved = PCM_BITS_PER_SAMPLE; } else { open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN; open.reserved = 0; } memset(open.dev_channel_mapping, 0, 8); if (channel_mode == 1) { open.dev_channel_mapping[0] = PCM_CHANNEL_FC; } else if (channel_mode == 2) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; } else if (channel_mode == 4) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_RB; open.dev_channel_mapping[3] = PCM_CHANNEL_LB; } else if (channel_mode == 6) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_LFE; open.dev_channel_mapping[3] = PCM_CHANNEL_FC; open.dev_channel_mapping[4] = PCM_CHANNEL_LB; open.dev_channel_mapping[5] = PCM_CHANNEL_RB; } else if (channel_mode == 8) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_LFE; open.dev_channel_mapping[3] = PCM_CHANNEL_FC; open.dev_channel_mapping[4] = PCM_CHANNEL_LB; open.dev_channel_mapping[5] = PCM_CHANNEL_RB; open.dev_channel_mapping[6] = PCM_CHANNEL_FLC; open.dev_channel_mapping[7] = PCM_CHANNEL_FRC; } else { pr_err("%s invalid num_chan %d\n", __func__, channel_mode); return -EINVAL; } open.hdr.src_svc = APR_SVC_ADM; open.hdr.src_domain = APR_DOMAIN_APPS; open.hdr.src_port = port_id; open.hdr.dest_svc = APR_SVC_ADM; open.hdr.dest_domain = APR_DOMAIN_ADSP; open.hdr.dest_port = port_id; open.hdr.token = port_id; open.mode = path; open.endpoint_id1 = port_id; if (this_adm.ec_ref_rx == 0) { open.endpoint_id2 = 0xFFFF; } else if (this_adm.ec_ref_rx && (path != 1)) { open.endpoint_id2 = this_adm.ec_ref_rx; this_adm.ec_ref_rx = 0; } pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d", __func__, open.endpoint_id1, open.endpoint_id2); /* convert path to acdb path */ if (path == ADM_PATH_PLAYBACK) open.topology_id = get_adm_rx_topology(); else { open.topology_id = get_adm_tx_topology(); if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) || (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY)) rate = 16000; } if (open.topology_id == 0) open.topology_id = topology; open.channel_config = channel_mode & 0x00FF; open.rate = rate; pr_debug("%s: channel_config=%d port_id=%d rate=%d" " topology_id=0x%X\n", __func__, open.channel_config, open.endpoint_id1, open.rate, open.topology_id); atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; } int adm_matrix_map(int session_id, int path, int num_copps, unsigned int *port_id, int copp_id) { struct adm_routings_command route; int ret = 0, i = 0; /* Assumes port_ids have already been validated during adm_open */ int index = afe_get_port_index(copp_id); int copp_cnt; if (index < 0 || index >= AFE_MAX_PORTS) { pr_err("%s: invalid port idx %d token %d\n", __func__, index, copp_id); return 0; } pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n", __func__, session_id, path, num_copps, port_id[0]); route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); route.hdr.pkt_size = sizeof(route); route.hdr.src_svc = 0; route.hdr.src_domain = APR_DOMAIN_APPS; route.hdr.src_port = copp_id; route.hdr.dest_svc = APR_SVC_ADM; route.hdr.dest_domain = APR_DOMAIN_ADSP; route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); route.hdr.token = <PASSWORD>_id; route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS; route.num_sessions = 1; route.session[0].id = session_id; if (num_copps < ADM_MAX_COPPS) { copp_cnt = num_copps; } else { copp_cnt = ADM_MAX_COPPS; /* print out warning for now as playback/capture to/from * COPPs more than maximum allowed is extremely unlikely */ pr_warn("%s: max out routable COPPs\n", __func__); } route.session[0].num_copps = copp_cnt; for (i = 0; i < copp_cnt; i++) { int tmp; port_id[i] = afe_convert_virtual_to_portid(port_id[i]); tmp = afe_get_port_index(port_id[i]); pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i, port_id[i], tmp); if (tmp >= 0 && tmp < AFE_MAX_PORTS) route.session[0].copp_id[i] = atomic_read(&this_adm.copp_id[tmp]); } if (copp_cnt % 2) route.session[0].copp_id[i] = 0; switch (path) { case 0x1: route.path = AUDIO_RX; break; case 0x2: case 0x3: route.path = AUDIO_TX; break; default: pr_err("%s: Wrong path set[%d]\n", __func__, path); break; } atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route); if (ret < 0) { pr_err("%s: ADM routing for port %d failed\n", __func__, port_id[0]); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: ADM cmd Route failed for port %d\n", __func__, port_id[0]); ret = -EINVAL; goto fail_cmd; } for (i = 0; i < num_copps; i++) send_adm_cal(port_id[i], path); for (i = 0; i < num_copps; i++) { int tmp; tmp = afe_get_port_index(port_id[i]); if (tmp >= 0 && tmp < AFE_MAX_PORTS) rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id[tmp]), path, session_id); else pr_debug("%s: Invalid port index %d", __func__, tmp); } return 0; fail_cmd: return ret; } int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id, uint32_t *bufsz, uint32_t bufcnt) { struct adm_cmd_memory_map_regions *mmap_regions = NULL; struct adm_memory_map_regions *mregions = NULL; void *mmap_region_cmd = NULL; void *payload = NULL; int ret = 0; int i = 0; int cmd_size = 0; pr_debug("%s\n", __func__); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } cmd_size = sizeof(struct adm_cmd_memory_map_regions) + sizeof(struct adm_memory_map_regions) * bufcnt; mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (!mmap_region_cmd) { pr_err("%s: allocate mmap_region_cmd failed\n", __func__); return -ENOMEM; } mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd; mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mmap_regions->hdr.pkt_size = cmd_size; mmap_regions->hdr.src_port = 0; mmap_regions->hdr.dest_port = 0; mmap_regions->hdr.token = 0; mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS; mmap_regions->mempool_id = mempool_id & 0x00ff; mmap_regions->nregions = bufcnt & 0x00ff; pr_debug("%s: map_regions->nregions = %d\n", __func__, mmap_regions->nregions); payload = ((u8 *) mmap_region_cmd + sizeof(struct adm_cmd_memory_map_regions)); mregions = (struct adm_memory_map_regions *)payload; for (i = 0; i < bufcnt; i++) { mregions->phys = buf_add[i]; mregions->buf_size = bufsz[i]; ++mregions; } atomic_set(&this_adm.copp_stat[0], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd); if (ret < 0) { pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, mmap_regions->hdr.opcode, ret); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[0]), 5 * HZ); if (!ret) { pr_err("%s: timeout. waited for memory_map\n", __func__); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(mmap_region_cmd); return ret; } int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz, uint32_t bufcnt) { struct adm_cmd_memory_unmap_regions *unmap_regions = NULL; struct adm_memory_unmap_regions *mregions = NULL; void *unmap_region_cmd = NULL; void *payload = NULL; int ret = 0; int i = 0; int cmd_size = 0; pr_debug("%s\n", __func__); if (this_adm.apr == NULL) { pr_err("%s APR handle NULL\n", __func__); return -EINVAL; } cmd_size = sizeof(struct adm_cmd_memory_unmap_regions) + sizeof(struct adm_memory_unmap_regions) * bufcnt; unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (!unmap_region_cmd) { pr_err("%s: allocate unmap_region_cmd failed\n", __func__); return -ENOMEM; } unmap_regions = (struct adm_cmd_memory_unmap_regions *) unmap_region_cmd; unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); unmap_regions->hdr.pkt_size = cmd_size; unmap_regions->hdr.src_port = 0; unmap_regions->hdr.dest_port = 0; unmap_regions->hdr.token = 0; unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS; unmap_regions->nregions = bufcnt & 0x00ff; unmap_regions->reserved = 0; pr_debug("%s: unmap_regions->nregions = %d\n", __func__, unmap_regions->nregions); payload = ((u8 *) unmap_region_cmd + sizeof(struct adm_cmd_memory_unmap_regions)); mregions = (struct adm_memory_unmap_regions *)payload; for (i = 0; i < bufcnt; i++) { mregions->phys = buf_add[i]; ++mregions; } atomic_set(&this_adm.copp_stat[0], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd); if (ret < 0) { pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, unmap_regions->hdr.opcode, ret); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[0]), 5 * HZ); if (!ret) { pr_err("%s: timeout. waited for memory_unmap\n", __func__); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(unmap_region_cmd); return ret; } int adm_get_copp_id(int port_index) { pr_debug("%s\n", __func__); if (port_index < 0) { pr_err("%s: invalid port_id = %d\n", __func__, port_index); return -EINVAL; } return atomic_read(&this_adm.copp_id[port_index]); } void adm_ec_ref_rx_id(int port_id) { this_adm.ec_ref_rx = port_id; pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx); } int adm_pseudo_close(int port_id) { struct apr_hdr close; int ret = 0, i = 0; int index = 0; int pseudo_copp_cnt; index = afe_get_port_index(port_id); if (afe_validate_port(port_id) < 0) return -EINVAL; pseudo_copp_cnt = atomic_read(&this_adm.copp_cnt[index]); pr_debug("%s port_id=%d index %d copp_cnt %d\n", __func__, port_id, index, pseudo_copp_cnt); for (i = 0; i < pseudo_copp_cnt; i++) { close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); close.pkt_size = sizeof(close); close.src_svc = APR_SVC_ADM; close.src_domain = APR_DOMAIN_APPS; close.src_port = port_id; close.dest_svc = APR_SVC_ADM; close.dest_domain = APR_DOMAIN_ADSP; close.dest_port = pseudo_copp[i]; close.token = <PASSWORD>; close.opcode = ADM_CMD_COPP_CLOSE; atomic_set(&this_adm.copp_id[index], RESET_COPP_ID); atomic_set(&this_adm.copp_stat[index], 0); pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n", __func__, atomic_read(&this_adm.copp_id[index]), port_id, index, atomic_read(&this_adm.copp_cnt[index])); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close); ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); } atomic_set(&this_adm.copp_cnt[index], 0); return ret; } int adm_close(int port_id) { struct apr_hdr close; int ret = 0; int index = 0; port_id = afe_convert_virtual_to_portid(port_id); index = afe_get_port_index(port_id); if (afe_validate_port(port_id) < 0) return -EINVAL; pr_debug("%s port_id=%d index %d\n", __func__, port_id, index); if (!(atomic_read(&this_adm.copp_cnt[index]))) { pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id); goto fail_cmd; } atomic_dec(&this_adm.copp_cnt[index]); if (!(atomic_read(&this_adm.copp_cnt[index]))) { close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); close.pkt_size = sizeof(close); close.src_svc = APR_SVC_ADM; close.src_domain = APR_DOMAIN_APPS; close.src_port = port_id; close.dest_svc = APR_SVC_ADM; close.dest_domain = APR_DOMAIN_ADSP; close.dest_port = atomic_read(&this_adm.copp_id[index]); close.token = <PASSWORD>; close.opcode = ADM_CMD_COPP_CLOSE; atomic_set(&this_adm.copp_id[index], RESET_COPP_ID); atomic_set(&this_adm.copp_stat[index], 0); pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n", __func__, atomic_read(&this_adm.copp_id[index]), port_id, index, atomic_read(&this_adm.copp_cnt[index])); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close); if (ret < 0) { pr_err("%s ADM close failed\n", __func__); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: ADM cmd Route failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } rtac_remove_adm_device(port_id); } fail_cmd: return ret; } static int __init adm_init(void) { int i = 0; init_waitqueue_head(&this_adm.wait); this_adm.apr = NULL; for (i = 0; i < AFE_MAX_PORTS; i++) { atomic_set(&this_adm.copp_id[i], RESET_COPP_ID); atomic_set(&this_adm.copp_cnt[i], 0); atomic_set(&this_adm.copp_stat[i], 0); } return 0; } device_initcall(adm_init);
Ezeer/VegaStrike_win32FR
vegastrike/boost/1_28/src/module.cpp
<gh_stars>0 // (C) Copyright <NAME> 2000. Permission to copy, use, modify, sell and // distribute this software is granted provided this copyright notice appears // in all copies. This software is provided "as is" without express or implied // warranty, and with no claim as to its suitability for any purpose. // // The author gratefully acknowleges the support of Dragon Systems, Inc., in // producing this work. #include <boost/python/detail/module_base.hpp> #include <boost/python/object/function.hpp> namespace boost { namespace python { namespace detail { module_base::module_base(const char* name) : m_module( Py_InitModule(const_cast<char*>(name), initial_methods) , ref::increment_count) { } module_base::~module_base() { } void module_base::setattr(const char* name, PyObject* x) { setattr(name, ref(x)); } void module_base::setattr(char const* name, ref const& x) { // Use function::add_to_namespace to achieve overloading if // appropriate. objects::function::add_to_namespace(m_module, name, x); } void module_base::add(PyTypeObject* x) { this->setattr(x->tp_name, (PyObject*)x); } void module_base::add_type(ref x) { assert(PyObject_TypeCheck(x.get(), &PyType_Type)); add((PyTypeObject*)x.release()); } PyMethodDef module_base::initial_methods[] = { { 0, 0, 0, 0 } }; }}} // namespace boost::python::detail
hv-ojha/Hackerrank-Solutions
Java-Strings-Introduction.java
<gh_stars>1-10 import java.io.*; import java.util.*; public class Solution { public static void main(String[] args) { Scanner sc=new Scanner(System.in); String A=sc.next(); String B=sc.next(); /* Enter your code here. Print output to STDOUT. */ System.out.println(A.length()+B.length()); if(A.compareTo(B)>0) System.out.println("Yes"); else System.out.println("No"); StringBuffer s=new StringBuffer(); for(int i=0;i<A.length();i++) { if(i==0) s.append(Character.toUpperCase(A.charAt(i))); else s.append(Character.toLowerCase(A.charAt(i))); } s.append(' '); for(int i=0;i<B.length();i++) { if(i==0) s.append(Character.toUpperCase(B.charAt(i))); else s.append(Character.toLowerCase(B.charAt(i))); } System.out.println(s.toString()); } }
kwkoo/credscontroller
credscontroller/vendor/github.com/hashicorp/vault/helper/testhelpers/mongodb/mongodbhelper.go
<filename>credscontroller/vendor/github.com/hashicorp/vault/helper/testhelpers/mongodb/mongodbhelper.go<gh_stars>0 package mongodb import ( "crypto/tls" "errors" "fmt" "net" "net/url" "os" "strconv" "strings" "testing" "time" "github.com/ory/dockertest" "gopkg.in/mgo.v2" ) // PrepareTestContainer calls PrepareTestContainerWithDatabase without a // database name value, which results in configuring a database named "test" func PrepareTestContainer(t *testing.T, version string) (cleanup func(), retURL string) { return PrepareTestContainerWithDatabase(t, version, "") } // PrepareTestContainerWithDatabase configures a test container with a given // database name, to test non-test/admin database configurations func PrepareTestContainerWithDatabase(t *testing.T, version, dbName string) (cleanup func(), retURL string) { if os.Getenv("MONGODB_URL") != "" { return func() {}, os.Getenv("MONGODB_URL") } pool, err := dockertest.NewPool("") if err != nil { t.Fatalf("Failed to connect to docker: %s", err) } resource, err := pool.Run("mongo", version, []string{}) if err != nil { t.Fatalf("Could not start local mongo docker container: %s", err) } cleanup = func() { err := pool.Purge(resource) if err != nil { t.Fatalf("Failed to cleanup local container: %s", err) } } retURL = fmt.Sprintf("mongodb://localhost:%s", resource.GetPort("27017/tcp")) if dbName != "" { retURL = fmt.Sprintf("%s/%s", retURL, dbName) } // exponential backoff-retry if err = pool.Retry(func() error { var err error dialInfo, err := parseMongoURL(retURL) if err != nil { return err } session, err := mgo.DialWithInfo(dialInfo) if err != nil { return err } defer session.Close() session.SetSyncTimeout(1 * time.Minute) session.SetSocketTimeout(1 * time.Minute) return session.Ping() }); err != nil { cleanup() t.Fatalf("Could not connect to mongo docker container: %s", err) } return } // parseMongoURL will parse a connection string and return a configured dialer func parseMongoURL(rawURL string) (*mgo.DialInfo, error) { url, err := url.Parse(rawURL) if err != nil { return nil, err } info := mgo.DialInfo{ Addrs: strings.Split(url.Host, ","), Database: strings.TrimPrefix(url.Path, "/"), Timeout: 10 * time.Second, } if url.User != nil { info.Username = url.User.Username() info.Password, _ = url.User.Password() } query := url.Query() for key, values := range query { var value string if len(values) > 0 { value = values[0] } switch key { case "authSource": info.Source = value case "authMechanism": info.Mechanism = value case "gssapiServiceName": info.Service = value case "replicaSet": info.ReplicaSetName = value case "maxPoolSize": poolLimit, err := strconv.Atoi(value) if err != nil { return nil, errors.New("bad value for maxPoolSize: " + value) } info.PoolLimit = poolLimit case "ssl": // Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that // ourselves. See https://github.com/go-mgo/mgo/issues/84 ssl, err := strconv.ParseBool(value) if err != nil { return nil, errors.New("bad value for ssl: " + value) } if ssl { info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { return tls.Dial("tcp", addr.String(), &tls.Config{}) } } case "connect": if value == "direct" { info.Direct = true break } if value == "replicaSet" { break } fallthrough default: return nil, errors.New("unsupported connection URL option: " + key + "=" + value) } } return &info, nil }
julz/garden-linux
old/linux_backend/bandwidth_manager/bandwidth_manager.go
package bandwidth_manager import ( "bytes" "fmt" "os/exec" "path" "regexp" "strconv" "github.com/cloudfoundry-incubator/garden-linux/old/logging" "github.com/cloudfoundry-incubator/garden/api" "github.com/cloudfoundry/gunk/command_runner" "github.com/pivotal-golang/lager" ) var IN_RATE_PATTERN = regexp.MustCompile(`qdisc tbf [0-9a-f]+: root refcnt \d+ rate (\d+)([KMG]?)bit burst (\d+)([KMG]?)b`) var OUT_RATE_PATTERN = regexp.MustCompile(`police 0x[0-9a-f]+ rate (\d+)([KMG]?)bit burst (\d+)([KMG]?)b`) type BandwidthManager interface { SetLimits(lager.Logger, api.BandwidthLimits) error GetLimits(lager.Logger) (api.ContainerBandwidthStat, error) } type ContainerBandwidthManager struct { containerPath string containerID string runner command_runner.CommandRunner } func New(containerPath, containerID string, runner command_runner.CommandRunner) *ContainerBandwidthManager { return &ContainerBandwidthManager{ containerPath: containerPath, containerID: containerID, runner: runner, } } func (m *ContainerBandwidthManager) SetLimits( logger lager.Logger, limits api.BandwidthLimits, ) error { runner := logging.Runner{ CommandRunner: m.runner, Logger: logger, } setRate := exec.Command(path.Join(m.containerPath, "net_rate.sh")) setRate.Env = []string{ fmt.Sprintf("BURST=%d", limits.BurstRateInBytesPerSecond), fmt.Sprintf("RATE=%d", limits.RateInBytesPerSecond*8), } return runner.Run(setRate) } func (m *ContainerBandwidthManager) GetLimits(logger lager.Logger) (api.ContainerBandwidthStat, error) { limits := api.ContainerBandwidthStat{} runner := logging.Runner{ CommandRunner: m.runner, Logger: logger, } egressOut := new(bytes.Buffer) egress := exec.Command(path.Join(m.containerPath, "net.sh"), "get_egress_info") egress.Env = []string{"ID=" + m.containerID} egress.Stdout = egressOut err := runner.Run(egress) if err != nil { return limits, err } matches := IN_RATE_PATTERN.FindStringSubmatch(string(egressOut.Bytes())) if matches != nil { inRate, err := strconv.ParseUint(matches[1], 10, 0) if err != nil { return limits, err } inBurst, err := strconv.ParseUint(matches[3], 10, 0) if err != nil { return limits, err } inRateUnit := matches[2] inBurstUnit := matches[4] limits.InRate = convertUnits(inRate, inRateUnit) / 8 limits.InBurst = convertUnits(inBurst, inBurstUnit) } ingressOut := new(bytes.Buffer) ingress := exec.Command(path.Join(m.containerPath, "net.sh"), "get_ingress_info") ingress.Env = []string{"ID=" + m.containerID} ingress.Stdout = ingressOut err = runner.Run(ingress) if err != nil { return limits, err } matches = OUT_RATE_PATTERN.FindStringSubmatch(string(ingressOut.Bytes())) if matches != nil { outRate, err := strconv.ParseUint(matches[1], 10, 0) if err != nil { return limits, err } outBurst, err := strconv.ParseUint(matches[3], 10, 0) if err != nil { return limits, err } outRateUnit := matches[2] outBurstUnit := matches[4] limits.OutRate = convertUnits(outRate, outRateUnit) / 8 limits.OutBurst = convertUnits(outBurst, outBurstUnit) } return limits, err } func convertUnits(num uint64, unit string) uint64 { switch unit { case "K": return num * 1024 case "M": return num * (1024 ^ 2) case "G": return num * (1024 ^ 3) default: return num } }
PowerOlive/mindspore
mindspore/ccsrc/fl/server/cert_verify.cc
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fl/server/cert_verify.h" #include <sys/time.h> #include <iostream> #include <cstdio> #include <cstring> #include <cstdlib> #include <vector> #include <iomanip> #include <sstream> namespace mindspore { namespace ps { namespace server { #ifndef _WIN32 static int64_t replayAttackTimeDiff; static int64_t certStartTimeDiff = -600; X509 *CertVerify::readCertFromFile(const std::string &certPath) { BIO *bio = BIO_new_file(certPath.c_str(), "r"); X509 *certObj = PEM_read_bio_X509(bio, nullptr, nullptr, nullptr); BIO_free_all(bio); return certObj; } X509 *CertVerify::readCertFromPerm(std::string cert) { BIO *bio = BIO_new_mem_buf(reinterpret_cast<void *>(cert.data()), -1); X509 *certObj = PEM_read_bio_X509(bio, nullptr, nullptr, nullptr); BIO_free_all(bio); return certObj; } X509_CRL *CertVerify::readCrlFromFile(const std::string &crlPath) { BIO *bio = BIO_new_file(crlPath.c_str(), "r"); X509_CRL *crl = PEM_read_bio_X509_CRL(bio, nullptr, nullptr, nullptr); BIO_free_all(bio); return crl; } bool checkFileExists(const std::string &file) { std::ifstream f(file.c_str()); if (!f.good()) { return false; } else { f.close(); return true; } } bool CertVerify::verifyCertTime(const X509 *cert) const { ASN1_TIME *start = X509_getm_notBefore(cert); ASN1_TIME *end = X509_getm_notAfter(cert); int day = 0; int sec = 0; int ret = ASN1_TIME_diff(&day, &sec, start, NULL); if (ret != 1) { return false; } if (day < 0) { MS_LOG(ERROR) << "cert start day time is later than now day time, day is" << day; return false; } if (day == 0 && sec < certStartTimeDiff) { MS_LOG(ERROR) << "cert start second time is later than 600 second, second is" << sec; return false; } day = 0; sec = 0; ret = ASN1_TIME_diff(&day, &sec, NULL, end); if (ret != 1) { return false; } if (day < 0 || sec < 0) { MS_LOG(ERROR) << "cert end time is sooner than now time."; return false; } MS_LOG(INFO) << "verify cert time success."; return true; } bool CertVerify::verifyPublicKey(const X509 *keyAttestationCertObj, const X509 *equipCertObj, const X509 *equipCACertObj, const X509 *rootFirstCA, const X509 *rootSecondCA) const { bool result = true; EVP_PKEY *equipPubKey = X509_get_pubkey(const_cast<X509 *>(equipCertObj)); EVP_PKEY *equipCAPubKey = X509_get_pubkey(const_cast<X509 *>(equipCACertObj)); EVP_PKEY *rootFirstPubKey = X509_get_pubkey(const_cast<X509 *>(rootFirstCA)); EVP_PKEY *rootSecondPubKey = X509_get_pubkey(const_cast<X509 *>(rootSecondCA)); do { int ret = 0; ret = X509_verify(const_cast<X509 *>(keyAttestationCertObj), equipPubKey); if (ret != 1) { MS_LOG(ERROR) << "keyAttestationCert verify is failed"; result = false; break; } ret = X509_verify(const_cast<X509 *>(equipCertObj), equipCAPubKey); if (ret != 1) { MS_LOG(ERROR) << "equip cert verify is failed"; result = false; break; } int ret_first = X509_verify(const_cast<X509 *>(equipCACertObj), rootFirstPubKey); int ret_second = X509_verify(const_cast<X509 *>(equipCACertObj), rootSecondPubKey); if (ret_first != 1 && ret_second != 1) { MS_LOG(ERROR) << "equip ca cert verify is failed"; result = false; break; } } while (0); EVP_PKEY_free(equipPubKey); EVP_PKEY_free(equipCAPubKey); EVP_PKEY_free(rootFirstPubKey); EVP_PKEY_free(rootSecondPubKey); MS_LOG(INFO) << "verify Public Key success."; return result; } bool CertVerify::verifyCAChain(const std::string &keyAttestation, const std::string &equipCert, const std::string &equipCACert, const std::string &rootFirstCAPath, const std::string &rootSecondCAPath) { X509 *rootFirstCA = CertVerify::readCertFromFile(rootFirstCAPath); X509 *rootSecondCA = CertVerify::readCertFromFile(rootSecondCAPath); X509 *keyAttestationCertObj = readCertFromPerm(keyAttestation); X509 *equipCertObj = readCertFromPerm(equipCert); X509 *equipCACertObj = readCertFromPerm(equipCACert); bool result = true; do { if (rootFirstCA == nullptr || rootSecondCA == nullptr) { MS_LOG(ERROR) << "rootFirstCA or rootSecondCA is nullptr"; result = false; break; } if (keyAttestationCertObj == nullptr || equipCertObj == nullptr || equipCACertObj == nullptr) { result = false; break; } if (!verifyCertTime(keyAttestationCertObj) || !verifyCertTime(equipCertObj) || !verifyCertTime(equipCACertObj)) { result = false; break; } if (!verifyCertCommonName(equipCACertObj, equipCertObj)) { MS_LOG(ERROR) << "equip ca cert subject cn is not equal with equip cert issuer cn."; result = false; break; } if (!verifyCertCommonName(rootFirstCA, equipCACertObj) && !verifyCertCommonName(rootSecondCA, equipCACertObj)) { MS_LOG(ERROR) << "root CA cert subject cn is not equal with equip CA cert issuer cn."; result = false; break; } if (!verifyExtendedAttributes(equipCACertObj)) { MS_LOG(ERROR) << "verify equipCACert Extended Attributes failed."; result = false; break; } if (!verifyCertKeyID(rootFirstCA, equipCACertObj) && !verifyCertKeyID(rootSecondCA, equipCACertObj)) { MS_LOG(ERROR) << "root CA cert subject keyid is not equal with equip CA cert issuer keyid."; result = false; break; } if (!verifyCertKeyID(equipCACertObj, equipCertObj)) { MS_LOG(ERROR) << "equip CA cert subject keyid is not equal with equip cert issuer keyid."; result = false; break; } if (!verifyPublicKey(keyAttestationCertObj, equipCertObj, equipCACertObj, rootFirstCA, rootSecondCA)) { MS_LOG(ERROR) << "verify Public Key failed"; result = false; break; } } while (0); X509_free(rootFirstCA); X509_free(rootSecondCA); X509_free(keyAttestationCertObj); X509_free(equipCertObj); X509_free(equipCACertObj); MS_LOG(INFO) << "verifyCAChain success."; return result; } bool CertVerify::verifyCertKeyID(const X509 *caCert, const X509 *subCert) const { bool result = true; ASN1_OCTET_STRING *skid = nullptr; AUTHORITY_KEYID *akeyid = nullptr; do { int crit = 0; skid = reinterpret_cast<ASN1_OCTET_STRING *>(X509_get_ext_d2i(caCert, NID_subject_key_identifier, &crit, NULL)); if (skid == nullptr) { result = false; break; } char subject_keyid[512] = {0}; for (int i = 0; i < skid->length; i++) { char keyid[8] = {0}; int base = 512; (void)sprintf_s(keyid, sizeof(keyid), "%x ", (uint32_t)skid->data[i]); int ret = strcat_s(subject_keyid, base, keyid); if (ret == -1) { result = false; break; } } akeyid = reinterpret_cast<AUTHORITY_KEYID *>(X509_get_ext_d2i(subCert, NID_authority_key_identifier, &crit, NULL)); if (akeyid == nullptr) { result = false; break; } char issuer_keyid[512] = {0}; if (akeyid->keyid == nullptr) { MS_LOG(ERROR) << "keyid is nullprt."; result = false; break; } for (int i = 0; i < akeyid->keyid->length; i++) { char keyid[8] = {0}; int base = 512; (void)sprintf_s(keyid, sizeof(keyid), "%x ", (uint32_t)(akeyid->keyid->data[i])); int ret = strcat_s(issuer_keyid, base, keyid); if (ret == -1) { result = false; break; } } std::string subject_keyid_str = subject_keyid; std::string issuer_keyid_str = issuer_keyid; if (subject_keyid_str != issuer_keyid_str) { result = false; break; } } while (0); ASN1_OCTET_STRING_free(skid); AUTHORITY_KEYID_free(akeyid); return result; } bool CertVerify::verifyExtendedAttributes(const X509 *cert) const { bool result = true; BASIC_CONSTRAINTS *bcons = nullptr; ASN1_BIT_STRING *lASN1UsageStr = nullptr; do { int cirt = 0; bcons = reinterpret_cast<BASIC_CONSTRAINTS *>(X509_get_ext_d2i(cert, NID_basic_constraints, &cirt, NULL)); if (bcons == nullptr) { result = false; break; } if (!bcons->ca) { MS_LOG(ERROR) << "Subject Type is End Entity."; result = false; break; } MS_LOG(INFO) << "Subject Type is CA."; lASN1UsageStr = reinterpret_cast<ASN1_BIT_STRING *>(X509_get_ext_d2i(cert, NID_key_usage, NULL, NULL)); if (lASN1UsageStr == nullptr) { result = false; break; } int16_t usage = lASN1UsageStr->data[0]; if (lASN1UsageStr->length > 1) { const unsigned int move = 8; usage |= lASN1UsageStr->data[1] << move; } if (!(usage & KU_KEY_CERT_SIGN)) { MS_LOG(ERROR) << "Subject is not Certificate Signature."; result = false; break; } MS_LOG(INFO) << "Subject is Certificate Signature."; } while (0); BASIC_CONSTRAINTS_free(bcons); ASN1_BIT_STRING_free(lASN1UsageStr); return result; } bool CertVerify::verifyCertCommonName(const X509 *caCert, const X509 *subCert) const { if (caCert == nullptr || subCert == nullptr) { return false; } char caSubjectCN[256] = ""; char subIssuerCN[256] = ""; X509_NAME *caSubjectX509CN = X509_get_subject_name(caCert); X509_NAME *subIssuerX509CN = X509_get_issuer_name(subCert); int ret = X509_NAME_get_text_by_NID(caSubjectX509CN, NID_commonName, caSubjectCN, sizeof(caSubjectCN)); if (ret < 0) { return false; } ret = X509_NAME_get_text_by_NID(subIssuerX509CN, NID_commonName, subIssuerCN, sizeof(subIssuerCN)); if (ret < 0) { return false; } std::string caSubjectCNStr = caSubjectCN; std::string subIssuerCNStr = subIssuerCN; if (caSubjectCNStr != subIssuerCNStr) { return false; } return true; } bool CertVerify::verifyCRL(const std::string &equipCert, const std::string &equipCrlPath) { if (!checkFileExists(equipCrlPath)) { return true; } bool result = true; X509_CRL *equipCrl = nullptr; X509 *equipCertObj = nullptr; EVP_PKEY *evp_pkey = nullptr; do { equipCrl = CertVerify::readCrlFromFile(equipCrlPath); equipCertObj = readCertFromPerm(equipCert); if (equipCertObj == nullptr) { result = false; break; } if (equipCrl == nullptr) { MS_LOG(INFO) << "equipCrl is nullptr. return true."; result = true; break; } evp_pkey = X509_get_pubkey(equipCertObj); int ret = X509_CRL_verify(equipCrl, evp_pkey); if (ret == 1) { MS_LOG(ERROR) << "equip cert in equip crl, verify failed"; result = false; break; } } while (0); EVP_PKEY_free(evp_pkey); X509_free(equipCertObj); X509_CRL_free(equipCrl); MS_LOG(INFO) << "verifyCRL success."; return result; } bool CertVerify::verifyRSAKey(const std::string &keyAttestation, const unsigned char *signData, const std::string &flID, const std::string &timeStamp) { if (keyAttestation.empty() || signData == nullptr || flID.empty() || timeStamp.empty()) { MS_LOG(ERROR) << "keyAttestation or signData or flID or timeStamp is empty."; return false; } bool result = true; X509 *keyAttestationCertObj = nullptr; EVP_PKEY *pubKey = nullptr; do { keyAttestationCertObj = readCertFromPerm(keyAttestation); std::string srcData = flID + " " + timeStamp; // SHA256_DIGEST_LENGTH is 32 unsigned char srcDataHash[SHA256_DIGEST_LENGTH]; sha256Hash(srcData, srcDataHash, SHA256_DIGEST_LENGTH); pubKey = X509_get_pubkey(keyAttestationCertObj); RSA *pRSAPublicKey = EVP_PKEY_get0_RSA(pubKey); if (pRSAPublicKey == nullptr) { MS_LOG(ERROR) << "get rsa public key failed."; result = false; break; } int pubKeyLen = RSA_size(pRSAPublicKey); unsigned char buffer[256]; int ret = RSA_public_decrypt(pubKeyLen, signData, buffer, pRSAPublicKey, RSA_NO_PADDING); if (ret == -1) { MS_LOG(ERROR) << "rsa public decrypt failed."; result = false; break; } int saltLen = -2; ret = RSA_verify_PKCS1_PSS(pRSAPublicKey, srcDataHash, EVP_sha256(), buffer, saltLen); if (ret != 1) { int64_t ulErr = SizeToLong(ERR_get_error()); char szErrMsg[1024] = {0}; MS_LOG(ERROR) << "verify error. error number: " << ulErr; std::string str_res = ERR_error_string(ulErr, szErrMsg); MS_LOG(ERROR) << szErrMsg; if (str_res.empty()) { result = false; break; } result = false; break; } } while (0); EVP_PKEY_free(pubKey); X509_free(keyAttestationCertObj); CRYPTO_cleanup_all_ex_data(); MS_LOG(INFO) << "verifyRSAKey success."; return result; } void CertVerify::sha256Hash(const uint8_t *src, const int src_len, uint8_t *hash, const int len) const { if (len <= 0) { return; } SHA256_CTX sha_ctx; int ret = SHA256_Init(&sha_ctx); if (ret != 1) { return; } ret = SHA256_Update(&sha_ctx, src, src_len); if (ret != 1) { return; } ret = SHA256_Final(hash, &sha_ctx); if (ret != 1) { return; } } std::string CertVerify::toHexString(const unsigned char *data, const int len) { if (data == nullptr) { MS_LOG(ERROR) << "data hash is null."; return ""; } if (len <= 0) { return ""; } std::stringstream ss; int base = 2; for (int i = 0; i < len; i++) { ss << std::hex << std::setw(base) << std::setfill('0') << static_cast<int>(data[i]); } return ss.str(); } bool CertVerify::verifyEquipCertAndFlID(const std::string &flID, const std::string &equipCert) { unsigned char hash[SHA256_DIGEST_LENGTH] = {""}; sha256Hash(equipCert, hash, SHA256_DIGEST_LENGTH); std::string equipCertSha256 = toHexString(hash, SHA256_DIGEST_LENGTH); if (flID == equipCertSha256) { MS_LOG(INFO) << "verifyEquipCertAndFlID success."; return true; } else { MS_LOG(ERROR) << "verifyEquipCertAndFlID failed."; return false; } } bool CertVerify::verifyTimeStamp(const std::string &flID, const std::string &timeStamp) const { int64_t requestTime = std::stoll(timeStamp.c_str()); const int64_t base = 1000; struct timeval tv {}; int ret = gettimeofday(&tv, nullptr); if (ret != 0) { return false; } int64_t now = tv.tv_sec * base + tv.tv_usec / base; MS_LOG(INFO) << "flID: " << flID.c_str() << ",now time: " << now << ",requestTime: " << requestTime; int64_t diff = now - requestTime; if (abs(diff) > replayAttackTimeDiff) { return false; } MS_LOG(INFO) << "verifyTimeStamp success."; return true; } void CertVerify::sha256Hash(const std::string &src, uint8_t *hash, const int len) const { if (len <= 0) { return; } SHA256_CTX sha_ctx; int ret = SHA256_Init(&sha_ctx); if (ret != 1) { return; } ret = SHA256_Update(&sha_ctx, src.c_str(), src.size()); if (ret != 1) { return; } ret = SHA256_Final(hash, &sha_ctx); if (ret != 1) { return; } } bool CertVerify::verifyRSAKey(const std::string &keyAttestation, const uint8_t *srcData, const uint8_t *signData, int srcDataLen) { if (keyAttestation.empty() || signData == nullptr || srcData == nullptr || srcDataLen <= 0) { MS_LOG(ERROR) << "keyAttestation or signData or srcData is invalid."; return false; } bool result = true; X509 *keyAttestationCertObj = nullptr; EVP_PKEY *pubKey = nullptr; do { keyAttestationCertObj = readCertFromPerm(keyAttestation); pubKey = X509_get_pubkey(keyAttestationCertObj); RSA *pRSAPublicKey = EVP_PKEY_get0_RSA(pubKey); if (pRSAPublicKey == nullptr) { MS_LOG(ERROR) << "get rsa public key failed."; result = false; break; } int pubKeyLen = RSA_size(pRSAPublicKey); unsigned char buffer[256]; int ret = RSA_public_decrypt(pubKeyLen, signData, buffer, pRSAPublicKey, RSA_NO_PADDING); if (ret == -1) { MS_LOG(ERROR) << "rsa public decrypt failed."; result = false; break; } int saltLen = -2; ret = RSA_verify_PKCS1_PSS(pRSAPublicKey, srcData, EVP_sha256(), buffer, saltLen); if (ret != 1) { int64_t ulErr = SizeToLong(ERR_get_error()); char szErrMsg[1024] = {0}; MS_LOG(ERROR) << "verify error. error number: " << ulErr; std::string str_res = ERR_error_string(ulErr, szErrMsg); MS_LOG(ERROR) << szErrMsg; if (str_res.empty()) { result = false; break; } result = false; break; } } while (0); EVP_PKEY_free(pubKey); X509_free(keyAttestationCertObj); CRYPTO_cleanup_all_ex_data(); MS_LOG(INFO) << "verifyRSAKey success."; return result; } bool CertVerify::initRootCertAndCRL(const std::string rootFirstCaFilePath, const std::string rootSecondCaFilePath, const std::string equipCrlPath, const uint64_t replay_attack_time_diff) { if (rootFirstCaFilePath.empty() || rootSecondCaFilePath.empty()) { MS_LOG(ERROR) << "the root or crl path is empty."; return false; } if (!checkFileExists(rootFirstCaFilePath)) { MS_LOG(ERROR) << "The rootFirstCaFilePath is not exist."; return false; } if (!checkFileExists(rootSecondCaFilePath)) { MS_LOG(ERROR) << "The rootSecondCaFilePath is not exist."; return false; } replayAttackTimeDiff = UlongToLong(replay_attack_time_diff); return true; } bool CertVerify::verifyCertAndSign(const std::string &flID, const std::string &timeStamp, const unsigned char *signData, const std::string &keyAttestation, const std::string &equipCert, const std::string &equipCACert, const std::string &rootFirstCAPath, const std::string &rootSecondCAPath, const std::string &equipCrlPath) { if (!verifyEquipCertAndFlID(flID, equipCert)) { return false; } if (!verifyCAChain(keyAttestation, equipCert, equipCACert, rootFirstCAPath, rootSecondCAPath)) { return false; } if (!verifyCRL(equipCert, equipCrlPath)) { return false; } if (!verifyRSAKey(keyAttestation, signData, flID, timeStamp)) { return false; } if (!verifyTimeStamp(flID, timeStamp)) { return false; } return true; } #else bool CertVerify::verifyTimeStamp(const std::string &flID, const std::string &timeStamp) const { MS_LOG(WARNING) << "verifyTimeStamp in win32 platform."; return false; } void CertVerify::sha256Hash(const uint8_t *src, const int src_len, uint8_t *hash, const int len) const { MS_LOG(WARNING) << "sha256Hash in win32 platform."; } bool CertVerify::verifyRSAKey(const std::string &keyAttestation, const uint8_t *srcData, const uint8_t *signData, int srcDataLen) { MS_LOG(WARNING) << "verifyRSAKey in win32 platform."; return false; } bool CertVerify::initRootCertAndCRL(const std::string rootFirstCaFilePath, const std::string rootSecondCaFilePath, const std::string equipCrlPath, const uint64_t replay_attack_time_diff) { MS_LOG(WARNING) << "initRootCertAndCRL in win32 platform."; return false; } bool CertVerify::verifyCertAndSign(const std::string &flID, const std::string &timeStamp, const unsigned char *signData, const std::string &keyAttestation, const std::string &equipCert, const std::string &equipCACert, const std::string &rootFirstCAPath, const std::string &rootSecondCAPath, const std::string &equipCrlPath) { MS_LOG(WARNING) << "verifyCertAndSign in win32 platform."; return false; } #endif } // namespace server } // namespace ps } // namespace mindspore
raymond301/swift
services/search-db/src/main/java/edu/mayo/mprc/searchdb/builder/MassSpecDataExtractor.java
package edu.mayo.mprc.searchdb.builder; import edu.mayo.mprc.searchdb.dao.TandemMassSpectrometrySample; import java.util.Map; /** * For given biological sample name and name of a fraction, obtains a full information about the tandem mass spectrometry * sample (.RAW file or .mgf). * * @author <NAME> */ public interface MassSpecDataExtractor { /** * @param biologicalSampleName Name of the biological sample (corresponds to Scaffold "column"). * @param msmsSampleName Name of the MS/MS sample - typically matches the input file name without an extension. * @return Full information about the mass spectrometry sample. */ TandemMassSpectrometrySample getTandemMassSpectrometrySample(String biologicalSampleName, String msmsSampleName); Map<String, TandemMassSpectrometrySample> getMap(); }
qtwre/Open-Vehicle-Monitoring-System-3
vehicle/OVMS.V3/components/vehicle_bmwi3/ecu_definitions/ecu_lim_code.cpp
<filename>vehicle/OVMS.V3/components/vehicle_bmwi3/ecu_definitions/ecu_lim_code.cpp // // Warning: don't edit - generated by generate_ecu_code.pl processing ../dev/lim_i1.json: LIM 14: Charging interface module // This generated code makes it easier to process CANBUS messages from the LIM ecu in a BMW i3 // case I3_PID_LIM_STATUS_CALCVN: { // 0x2541 if (datalen < 20) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_STATUS_CALCVN", 20); break; } unsigned long STAT_CVN_WERT = (RXBUF_UINT32(16)); // Read out CVN (here the CVN must be output as in mode $ 09 (PID $ 06)) / CVN auslesen (hier muss die CVN wie // bei Mode $09 (PID $06) ausgegeben werden) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%lu%s\n", "LIM", "STATUS_CALCVN", "STAT_CVN_WERT", STAT_CVN_WERT, "\"HEX\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_LADESTECKDOSE_TEMPERATUR: { // 0xDB0F if (datalen < 2) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_LADESTECKDOSE_TEMPERATUR", 2); break; } float STAT_LADESTECKDOSE_TEMP_WERT = (RXBUF_UINT(0)/10.0f-40.0); // Temperature of the DC charging connection in ° C (China) / Temperatur der DC-Ladeanschluss in °C (China) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%.4f%s\n", "LIM", "LADESTECKDOSE_TEMPERATUR", "STAT_LADESTECKDOSE_TEMP_WERT", STAT_LADESTECKDOSE_TEMP_WERT, "\"°C\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_LADEBEREITSCHAFT_LIM: { // 0xDEF2 if (datalen < 1) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_LADEBEREITSCHAFT_LIM", 1); break; } unsigned char STAT_LADEBEREITSCHAFT_LIM = (RXBUF_UCHAR(0)); // Ready to charge (HW line), (1 = yes, 0 = no) sent from LIM to SLE / Ladebereitschaft (HW-Leitung), (1 = ja, 0 // = nein) vom LIM an SLE gesendet ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "LADEBEREITSCHAFT_LIM", "STAT_LADEBEREITSCHAFT_LIM", STAT_LADEBEREITSCHAFT_LIM, "\"0/1\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_PROXIMITY: { // 0xDEF5 if (datalen < 2) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_PROXIMITY", 2); break; } unsigned char STAT_STECKER_NR = (RXBUF_UCHAR(0)); // Condition of the plug / Zustand des Steckers ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "PROXIMITY", "STAT_STECKER_NR", STAT_STECKER_NR, "\"0-n\""); unsigned char STAT_STROMTRAGFAEHIGKEIT_WERT = (RXBUF_UCHAR(1)); // Current carrying capacity of the connected cable / Stromtragfähigkeit des angeschlossenen Kabels ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "PROXIMITY", "STAT_STROMTRAGFAEHIGKEIT_WERT", STAT_STROMTRAGFAEHIGKEIT_WERT, "\"A\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_PILOTSIGNAL: { // 0xDEF6 if (datalen < 7) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_PILOTSIGNAL", 7); break; } unsigned char STAT_PILOT_AKTIV = (RXBUF_UCHAR(0)); // State of the pilot signal (0 = not active, 1 = active) / Zustand des Pilotsignals (0 = nicht aktiv, 1 = aktiv) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "PILOTSIGNAL", "STAT_PILOT_AKTIV", STAT_PILOT_AKTIV, "\"0/1\""); unsigned char STAT_PILOT_PWM_DUTYCYCLE_WERT = (RXBUF_UCHAR(1)); // Pulse duty factor PWM pilot signal / Tastverhältnis PWM Pilotsignal ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "PILOTSIGNAL", "STAT_PILOT_PWM_DUTYCYCLE_WERT", STAT_PILOT_PWM_DUTYCYCLE_WERT, "\"%\""); unsigned char STAT_PILOT_CURRENT_WERT = (RXBUF_UCHAR(2)); // Current value calculated from the pilot signal / Errechneter Stromwert aus Pilotsignal ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "PILOTSIGNAL", "STAT_PILOT_CURRENT_WERT", STAT_PILOT_CURRENT_WERT, "\"A\""); unsigned char STAT_PILOT_LADEBEREIT = (RXBUF_UCHAR(3)); // Vehicle ready to charge state (0 = not ready to charge, 1 = ready to charge) / Zustand Ladebereitschaft // Fahrzeug (0 = nicht ladebereit, 1 = ladebereit) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "PILOTSIGNAL", "STAT_PILOT_LADEBEREIT", STAT_PILOT_LADEBEREIT, "\"0/1\""); unsigned short STAT_PILOT_FREQUENZ_WERT = (RXBUF_UINT(4)); // Frequency of the pilot signal / Frequenz des Pilotsignals ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%u%s\n", "LIM", "PILOTSIGNAL", "STAT_PILOT_FREQUENZ_WERT", STAT_PILOT_FREQUENZ_WERT, "\"Hz\""); float STAT_PILOT_PEGEL_WERT = (RXBUF_UCHAR(6)/10.0f); // Pilot signal level / Pegel des Pilotsignals ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%.4f%s\n", "LIM", "PILOTSIGNAL", "STAT_PILOT_PEGEL_WERT", STAT_PILOT_PEGEL_WERT, "\"V\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_LADESCHNITTSTELLE_DC_TEPCO: { // 0xDEF7 if (datalen < 4) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_LADESCHNITTSTELLE_DC_TEPCO", 4); break; } unsigned char STAT_CHARGE_CONTROL_1 = (RXBUF_UCHAR(0)); // Charge control status 1 line (0 = not active, 1 = active) / Zustand Charge control 1 Leitung (0 = nicht aktiv, // 1 = aktiv) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "LADESCHNITTSTELLE_DC_TEPCO", "STAT_CHARGE_CONTROL_1", STAT_CHARGE_CONTROL_1, "\"0/1\""); unsigned char STAT_CHARGE_CONTROL_2 = (RXBUF_UCHAR(1)); // Charge control status 2 line (0 = not active, 1 = active) / Zustand Charge control 2 Leitung (0 = nicht aktiv, // 1 = aktiv) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "LADESCHNITTSTELLE_DC_TEPCO", "STAT_CHARGE_CONTROL_2", STAT_CHARGE_CONTROL_2, "\"0/1\""); unsigned char STAT_CHARGE_PERMISSION = (RXBUF_UCHAR(2)); // Charge permission line status (0 = not active, 1 = active) / Zustand Charge Permission Leitung (0 = nicht // aktiv, 1 = aktiv) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "LADESCHNITTSTELLE_DC_TEPCO", "STAT_CHARGE_PERMISSION", STAT_CHARGE_PERMISSION, "\"0/1\""); unsigned char STAT_LADESTECKER = (RXBUF_UCHAR(3)); // State of charging plug (0 = not plugged in, 1 = plugged in) / Zustand Ladestecker (0 = nicht gesteckt, 1 = // gesteckt) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "LADESCHNITTSTELLE_DC_TEPCO", "STAT_LADESTECKER", STAT_LADESTECKER, "\"0/1\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_DC_SCHUETZ_SCHALTER: { // 0xDEF8 if (datalen < 1) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_DC_SCHUETZ_SCHALTER", 1); break; } unsigned char STAT_DC_SCHUETZ_SCHALTER = (RXBUF_UCHAR(0)); // Contactor switch status (DC charging) / Status Schützschalter (DC-Laden) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "DC_SCHUETZ_SCHALTER", "STAT_DC_SCHUETZ_SCHALTER", STAT_DC_SCHUETZ_SCHALTER, "\"0-n\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_DC_SCHUETZ_SPANNUNG_EINGANG: { // 0xDEF9 if (datalen < 3) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_DC_SCHUETZ_SPANNUNG_EINGANG", 3); break; } unsigned short STAT_DC_SCHUETZ_SPANNUNG_EINGANG_WERT = (RXBUF_UINT(0)); // Voltage at the input of the relay box (contactors) for DC charging / Spannung am Eingang der Relaisbox // (Schaltschütze) für DC-Laden ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%u%s\n", "LIM", "DC_SCHUETZ_SPANNUNG_EINGANG", "STAT_DC_SCHUETZ_SPANNUNG_EINGANG_WERT", STAT_DC_SCHUETZ_SPANNUNG_EINGANG_WERT, "\"V\""); unsigned char STAT_DC_SCHUETZ_SPANNUNG_NEGATIV = (RXBUF_UCHAR(2)); // Detection of a negative voltage (0 = no or positive voltage / 1 = negative voltage) / Erkennung einer // negativen Spannung (0 = keine oder positive Spannung / 1 = negative Spannung) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "DC_SCHUETZ_SPANNUNG_EINGANG", "STAT_DC_SCHUETZ_SPANNUNG_NEGATIV", STAT_DC_SCHUETZ_SPANNUNG_NEGATIV, "\"0/1\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; } case I3_PID_LIM_DC_PINABDECKUNG_COMBO: { // 0xDEFA if (datalen < 1) { ESP_LOGW(TAG, "Received %d bytes for %s, expected %d", datalen, "I3_PID_LIM_DC_PINABDECKUNG_COMBO", 1); break; } unsigned char STAT_DC_PINABDECKUNG = (RXBUF_UCHAR(0)); // State of the DC pin cover for combo socket (0 = closed, 1 = open) / Zustand der DC Pinabdeckung bei // Combo-Steckdose (0 = geschlossen, 1 = geöffnet) ESP_LOGD(TAG, "From ECU %s, pid %s: got %s=%x%s\n", "LIM", "DC_PINABDECKUNG_COMBO", "STAT_DC_PINABDECKUNG", STAT_DC_PINABDECKUNG, "\"0/1\""); // ========== Add your processing here ========== hexdump(rxbuf, type, pid); break; }
wcicola/jitsi
src/net/java/sip/communicator/plugin/otr/ScOtrEngineImpl.java
/* * Jitsi, the OpenSource Java VoIP and Instant Messaging client. * * Copyright @ 2015 Atlassian Pty Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.java.sip.communicator.plugin.otr; import java.net.*; import java.security.*; import java.util.*; import java.util.concurrent.*; import net.java.otr4j.*; import net.java.otr4j.crypto.*; import net.java.otr4j.session.*; import net.java.sip.communicator.plugin.otr.OtrContactManager.OtrContact; import net.java.sip.communicator.plugin.otr.authdialog.*; import net.java.sip.communicator.service.browserlauncher.*; import net.java.sip.communicator.service.contactlist.*; import net.java.sip.communicator.service.gui.*; import net.java.sip.communicator.service.protocol.*; import net.java.sip.communicator.util.*; import org.osgi.framework.*; /** * * @author <NAME> * @author <NAME> * @author <NAME> * @author <NAME> * @author <NAME> */ public class ScOtrEngineImpl implements ScOtrEngine, ChatLinkClickedListener, ServiceListener { private class ScOtrEngineHost implements OtrEngineHost { @Override public KeyPair getLocalKeyPair(SessionID sessionID) { AccountID accountID = OtrActivator.getAccountIDByUID(sessionID.getAccountID()); KeyPair keyPair = OtrActivator.scOtrKeyManager.loadKeyPair(accountID); if (keyPair == null) OtrActivator.scOtrKeyManager.generateKeyPair(accountID); return OtrActivator.scOtrKeyManager.loadKeyPair(accountID); } @Override public OtrPolicy getSessionPolicy(SessionID sessionID) { return getContactPolicy(getOtrContact(sessionID).contact); } @Override public void injectMessage(SessionID sessionID, String messageText) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); Contact contact = otrContact.contact; ContactResource resource = null; if (contact.supportResources()) { Collection<ContactResource> resources = contact.getResources(); if (resources != null) { for (ContactResource r : resources) { if (r.equals(otrContact.resource)) { resource = r; break; } } } } OperationSetBasicInstantMessaging imOpSet = contact .getProtocolProvider() .getOperationSet( OperationSetBasicInstantMessaging.class); // This is a dirty way of detecting whether the injected message // contains HTML markup. If this is the case then we should create // the message with the appropriate content type so that the remote // party can properly display the HTML. // When otr4j injects QueryMessages it calls // OtrEngineHost.getFallbackMessage() which is currently the only // host method that uses HTML so we can simply check if the injected // message contains the string that getFallbackMessage() returns. String otrHtmlFallbackMessage = "<a href=\"http://en.wikipedia.org/wiki/Off-the-Record_Messaging\">"; String contentType = messageText.contains(otrHtmlFallbackMessage) ? OperationSetBasicInstantMessaging.HTML_MIME_TYPE : OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE; Message message = imOpSet.createMessage( messageText, contentType, OperationSetBasicInstantMessaging.DEFAULT_MIME_ENCODING, null); injectedMessageUIDs.add(message.getMessageUID()); try { imOpSet.sendInstantMessage(contact, resource, message); } catch (OperationFailedException e) { throw new OtrException(e); } } @Override public void showError(SessionID sessionID, String err) { ScOtrEngineImpl.this.showError(sessionID, err); } public void showWarning(SessionID sessionID, String warn) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, warn, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); } @Override public void unreadableMessageReceived(SessionID sessionID) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); String resourceName = otrContact.resource != null ? "/" + otrContact.resource.getResourceName() : ""; Contact contact = otrContact.contact; String error = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.unreadablemsgreceived", new String[] {contact.getDisplayName() + resourceName}); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.ERROR_MESSAGE, error, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); } @Override public void unencryptedMessageReceived(SessionID sessionID, String msg) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; String warn = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.unencryptedmsgreceived"); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, warn, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); } @Override public void smpError(SessionID sessionID, int tlvType, boolean cheated) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; logger.debug("SMP error occurred" + ". Contact: " + contact.getDisplayName() + ". TLV type: " + tlvType + ". Cheated: " + cheated); String error = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.smperror"); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.ERROR_MESSAGE, error, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.setProgressFail(); progressDialog.setVisible(true); } @Override public void smpAborted(SessionID sessionID) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; Session session = otrEngine.getSession(sessionID); if (session.isSmpInProgress()) { String warn = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.smpaborted", new String[] {contact.getDisplayName()}); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, warn, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.setProgressFail(); progressDialog.setVisible(true); } } @Override public void finishedSessionMessage(SessionID sessionID, String msgText) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; String resourceName = otrContact.resource != null ? "/" + otrContact.resource.getResourceName() : ""; Contact contact = otrContact.contact; String error = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.sessionfinishederror", new String[] {msgText, contact.getDisplayName() + resourceName}); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.ERROR_MESSAGE, error, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); } @Override public void requireEncryptedMessage(SessionID sessionID, String msgText) throws OtrException { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; String error = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.requireencryption", new String[] {msgText}); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.ERROR_MESSAGE, error, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); } @Override public byte[] getLocalFingerprintRaw(SessionID sessionID) { AccountID accountID = OtrActivator.getAccountIDByUID(sessionID.getAccountID()); return OtrActivator.scOtrKeyManager.getLocalFingerprintRaw(accountID); } @Override public void askForSecret( SessionID sessionID, InstanceTag receiverTag, String question) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; SmpAuthenticateBuddyDialog dialog = new SmpAuthenticateBuddyDialog( otrContact, receiverTag, question); dialog.setVisible(true); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.init(); progressDialog.setVisible(true); } @Override public void verify( SessionID sessionID, String fingerprint, boolean approved) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; OtrActivator.scOtrKeyManager.verify(otrContact, fingerprint); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.setProgressSuccess(); progressDialog.setVisible(true); } @Override public void unverify(SessionID sessionID, String fingerprint) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; OtrActivator.scOtrKeyManager.unverify(otrContact, fingerprint); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.setProgressFail(); progressDialog.setVisible(true); } @Override public String getReplyForUnreadableMessage(SessionID sessionID) { AccountID accountID = OtrActivator.getAccountIDByUID(sessionID.getAccountID()); return OtrActivator.resourceService.getI18NString( "plugin.otr.activator.unreadablemsgreply", new String[] {accountID.getDisplayName(), accountID.getDisplayName()}); } @Override public String getFallbackMessage(SessionID sessionID) { AccountID accountID = OtrActivator.getAccountIDByUID(sessionID.getAccountID()); return OtrActivator.resourceService.getI18NString( "plugin.otr.activator.fallbackmessage", new String[] {accountID.getDisplayName()}); } @Override public void multipleInstancesDetected(SessionID sessionID) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; String resourceName = otrContact.resource != null ? "/" + otrContact.resource.getResourceName() : ""; Contact contact = otrContact.contact; String message = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.multipleinstancesdetected", new String[] {contact.getDisplayName() + resourceName}); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, message, OperationSetBasicInstantMessaging.HTML_MIME_TYPE); } @Override public void messageFromAnotherInstanceReceived(SessionID sessionID) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; String resourceName = otrContact.resource != null ? "/" + otrContact.resource.getResourceName() : ""; Contact contact = otrContact.contact; String message = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.msgfromanotherinstance", new String[] {contact.getDisplayName() + resourceName}); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, message, OperationSetBasicInstantMessaging.HTML_MIME_TYPE); } /** * Provide fragmenter instructions according to the Instant Messaging * transport channel of the contact's protocol. */ @Override public FragmenterInstructions getFragmenterInstructions( final SessionID sessionID) { final OtrContact otrContact = getOtrContact(sessionID); final OperationSetBasicInstantMessagingTransport transport = otrContact.contact.getProtocolProvider().getOperationSet( OperationSetBasicInstantMessagingTransport.class); if (transport == null) { // There is no operation set for querying transport parameters. // Assuming transport capabilities are unlimited. if (logger.isDebugEnabled()) { logger.debug("No implementation of " + "BasicInstantMessagingTransport available. Assuming " + "OTR defaults for OTR fragmentation instructions."); } return null; } int messageSize = transport.getMaxMessageSize(otrContact.contact); if (messageSize == OperationSetBasicInstantMessagingTransport.UNLIMITED) { messageSize = FragmenterInstructions.UNLIMITED; } int numberOfMessages = transport.getMaxNumberOfMessages(otrContact.contact); if (numberOfMessages == OperationSetBasicInstantMessagingTransport.UNLIMITED) { numberOfMessages = FragmenterInstructions.UNLIMITED; } if (logger.isDebugEnabled()) { logger.debug("OTR fragmentation instructions for sending a " + "message to " + otrContact.contact.getDisplayName() + " (" + otrContact.contact.getAddress() + "). Maximum number of " + "messages: " + numberOfMessages + ", maximum message size: " + messageSize); } return new FragmenterInstructions(numberOfMessages, messageSize); } } /** * The max timeout period elapsed prior to establishing a TIMED_OUT session. */ private static final int SESSION_TIMEOUT = OtrActivator.configService.getInt( "net.java.sip.communicator.plugin.otr.SESSION_STATUS_TIMEOUT", 30000); /** * Manages the scheduling of TimerTasks that are used to set Contact's * ScSessionStatus (to TIMED_OUT) after a period of time. */ private ScSessionStatusScheduler scheduler = new ScSessionStatusScheduler(); /** * This mapping is used for taking care of keeping SessionStatus and * ScSessionStatus in sync for every Session object. */ private Map<SessionID, ScSessionStatus> scSessionStatusMap = new ConcurrentHashMap<SessionID, ScSessionStatus>(); private static final Map<ScSessionID, OtrContact> contactsMap = new Hashtable<ScSessionID, OtrContact>(); private static final Map<OtrContact, SmpProgressDialog> progressDialogMap = new ConcurrentHashMap<OtrContact, SmpProgressDialog>(); public static OtrContact getOtrContact(SessionID sessionID) { return contactsMap.get(new ScSessionID(sessionID)); } /** * Returns the <tt>ScSessionID</tt> for given <tt>UUID</tt>. * @param guid the <tt>UUID</tt> identifying <tt>ScSessionID</tt>. * @return the <tt>ScSessionID</tt> for given <tt>UUID</tt> or <tt>null</tt> * if no matching session found. */ public static ScSessionID getScSessionForGuid(UUID guid) { for(ScSessionID scSessionID : contactsMap.keySet()) { if(scSessionID.getGUID().equals(guid)) { return scSessionID; } } return null; } public static SessionID getSessionID(OtrContact otrContact) { ProtocolProviderService pps = otrContact.contact.getProtocolProvider(); String resourceName = otrContact.resource != null ? "/" + otrContact.resource.getResourceName() : ""; SessionID sessionID = new SessionID( pps.getAccountID().getAccountUniqueID(), otrContact.contact.getAddress() + resourceName, pps.getProtocolName()); synchronized (contactsMap) { if(contactsMap.containsKey(new ScSessionID(sessionID))) return sessionID; ScSessionID scSessionID = new ScSessionID(sessionID); contactsMap.put(scSessionID, otrContact); } return sessionID; } private final OtrConfigurator configurator = new OtrConfigurator(); private final List<String> injectedMessageUIDs = new Vector<String>(); private final List<ScOtrEngineListener> listeners = new Vector<ScOtrEngineListener>(); /** * The logger */ private final Logger logger = Logger.getLogger(ScOtrEngineImpl.class); private final OtrEngineHost otrEngineHost = new ScOtrEngineHost(); private final OtrSessionManager otrEngine; public ScOtrEngineImpl() { otrEngine = new OtrSessionManagerImpl(otrEngineHost); // Clears the map after previous instance // This is required because of OSGi restarts in the same VM on Android contactsMap.clear(); scSessionStatusMap.clear(); this.otrEngine.addOtrEngineListener(new OtrEngineListener() { @Override public void sessionStatusChanged(SessionID sessionID) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; String resourceName = otrContact.resource != null ? "/" + otrContact.resource.getResourceName() : ""; Contact contact = otrContact.contact; // Cancels any scheduled tasks that will change the // ScSessionStatus for this Contact scheduler.cancel(otrContact); ScSessionStatus scSessionStatus = getSessionStatus(otrContact); String message = ""; final Session session = otrEngine.getSession(sessionID); switch (session.getSessionStatus()) { case ENCRYPTED: scSessionStatus = ScSessionStatus.ENCRYPTED; scSessionStatusMap.put(sessionID, scSessionStatus); PublicKey remotePubKey = session.getRemotePublicKey(); String remoteFingerprint = null; try { remoteFingerprint = new OtrCryptoEngineImpl(). getFingerprint(remotePubKey); } catch (OtrCryptoException e) { logger.debug( "Could not get the fingerprint from the " + "public key of contact: " + contact); } List<String> allFingerprintsOfContact = OtrActivator.scOtrKeyManager. getAllRemoteFingerprints(contact); if (allFingerprintsOfContact != null) { if (!allFingerprintsOfContact.contains( remoteFingerprint)) { OtrActivator.scOtrKeyManager.saveFingerprint( contact, remoteFingerprint); } } if (!OtrActivator.scOtrKeyManager.isVerified( contact, remoteFingerprint)) { OtrActivator.scOtrKeyManager.unverify( otrContact, remoteFingerprint); UUID sessionGuid = null; for(ScSessionID scSessionID : contactsMap.keySet()) { if(scSessionID.getSessionID().equals(sessionID)) { sessionGuid = scSessionID.getGUID(); break; } } OtrActivator.uiService.getChat(contact) .addChatLinkClickedListener(ScOtrEngineImpl.this); String unverifiedSessionWarning = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.unverifiedsessionwarning", new String[] { contact.getDisplayName() + resourceName, this.getClass().getName(), "AUTHENTIFICATION", sessionGuid.toString() }); OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, unverifiedSessionWarning, OperationSetBasicInstantMessaging.HTML_MIME_TYPE); } // show info whether history is on or off String otrAndHistoryMessage; if(!OtrActivator.getMessageHistoryService() .isHistoryLoggingEnabled() || !isHistoryLoggingEnabled(contact)) { otrAndHistoryMessage = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.historyoff", new String[]{ OtrActivator.resourceService .getSettingsString( "service.gui.APPLICATION_NAME"), this.getClass().getName(), "showHistoryPopupMenu" }); } else { otrAndHistoryMessage = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.historyon", new String[]{ OtrActivator.resourceService .getSettingsString( "service.gui.APPLICATION_NAME"), this.getClass().getName(), "showHistoryPopupMenu" }); } OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, otrAndHistoryMessage, OperationSetBasicInstantMessaging.HTML_MIME_TYPE); message = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.multipleinstancesdetected", new String[] {contact.getDisplayName()}); if (contact.supportResources() && contact.getResources() != null && contact.getResources().size() > 1) OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, message, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); message = OtrActivator.resourceService.getI18NString( OtrActivator.scOtrKeyManager.isVerified( contact, remoteFingerprint) ? "plugin.otr.activator.sessionstared" : "plugin.otr.activator.unverifiedsessionstared", new String[] {contact.getDisplayName() + resourceName}); break; case FINISHED: scSessionStatus = ScSessionStatus.FINISHED; scSessionStatusMap.put(sessionID, scSessionStatus); message = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.sessionfinished", new String[] {contact.getDisplayName() + resourceName}); break; case PLAINTEXT: scSessionStatus = ScSessionStatus.PLAINTEXT; scSessionStatusMap.put(sessionID, scSessionStatus); message = OtrActivator.resourceService.getI18NString( "plugin.otr.activator.sessionlost", new String[] {contact.getDisplayName() + resourceName}); break; } OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.SYSTEM_MESSAGE, message, OperationSetBasicInstantMessaging.HTML_MIME_TYPE); for (ScOtrEngineListener l : getListeners()) l.sessionStatusChanged(otrContact); } @Override public void multipleInstancesDetected(SessionID sessionID) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; for (ScOtrEngineListener l : getListeners()) l.multipleInstancesDetected(otrContact); } @Override public void outgoingSessionChanged(SessionID sessionID) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; for (ScOtrEngineListener l : getListeners()) l.outgoingSessionChanged(otrContact); } }); } /** * Checks whether history is enabled for the metacontact containing * the <tt>contact</tt>. * @param contact the contact to check. * @return whether chat logging is enabled while chatting * with <tt>contact</tt>. */ private boolean isHistoryLoggingEnabled(Contact contact) { MetaContact metaContact = OtrActivator .getContactListService().findMetaContactByContact(contact); if(metaContact != null) return OtrActivator.getMessageHistoryService() .isHistoryLoggingEnabled(metaContact.getMetaUID()); else return true; } @Override public void addListener(ScOtrEngineListener l) { synchronized (listeners) { if (!listeners.contains(l)) listeners.add(l); } } @Override public void chatLinkClicked(URI url) { String action = url.getPath(); if(action.equals("/AUTHENTIFICATION")) { UUID guid = UUID.fromString(url.getQuery()); if(guid == null) throw new RuntimeException( "No UUID found in OTR authenticate URL"); // Looks for registered action handler OtrActionHandler actionHandler = ServiceUtils.getService( OtrActivator.bundleContext, OtrActionHandler.class); if(actionHandler != null) { actionHandler.onAuthenticateLinkClicked(guid); } else { logger.error("No OtrActionHandler registered"); } } } @Override public void endSession(OtrContact otrContact) { SessionID sessionID = getSessionID(otrContact); try { setSessionStatus(otrContact, ScSessionStatus.PLAINTEXT); otrEngine.getSession(sessionID).endSession(); } catch (OtrException e) { showError(sessionID, e.getMessage()); } } @Override public OtrPolicy getContactPolicy(Contact contact) { ProtocolProviderService pps = contact.getProtocolProvider(); SessionID sessionID = new SessionID( pps.getAccountID().getAccountUniqueID(), contact.getAddress(), pps.getProtocolName()); int policy = this.configurator.getPropertyInt(sessionID + "contact_policy", -1); if (policy < 0) return getGlobalPolicy(); else return new OtrPolicyImpl(policy); } @Override public OtrPolicy getGlobalPolicy() { /* * SEND_WHITESPACE_TAG bit will be lowered until we stabilize the OTR. */ int defaultScOtrPolicy = OtrPolicy.OTRL_POLICY_DEFAULT & ~OtrPolicy.SEND_WHITESPACE_TAG; return new OtrPolicyImpl(this.configurator.getPropertyInt( "GLOBAL_POLICY", defaultScOtrPolicy)); } /** * Gets a copy of the list of <tt>ScOtrEngineListener</tt>s registered with * this instance which may safely be iterated without the risk of a * <tt>ConcurrentModificationException</tt>. * * @return a copy of the list of <tt>ScOtrEngineListener<tt>s registered * with this instance which may safely be iterated without the risk of a * <tt>ConcurrentModificationException</tt> */ private ScOtrEngineListener[] getListeners() { synchronized (listeners) { return listeners.toArray(new ScOtrEngineListener[listeners.size()]); } } /** * Manages the scheduling of TimerTasks that are used to set Contact's * ScSessionStatus after a period of time. * * @author <NAME> */ private class ScSessionStatusScheduler { private final Timer timer = new Timer(); private final Map<OtrContact, TimerTask> tasks = new ConcurrentHashMap<OtrContact, TimerTask>(); public void scheduleScSessionStatusChange( final OtrContact otrContact, final ScSessionStatus status) { cancel(otrContact); TimerTask task = new TimerTask() { @Override public void run() { setSessionStatus(otrContact, status); } }; timer.schedule(task, SESSION_TIMEOUT); tasks.put(otrContact, task); } public void cancel(final OtrContact otrContact) { TimerTask task = tasks.get(otrContact); if (task != null) task.cancel(); tasks.remove(otrContact); } public void serviceChanged(ServiceEvent ev) { Object service = OtrActivator.bundleContext.getService( ev.getServiceReference()); if (!(service instanceof ProtocolProviderService)) return; if (ev.getType() == ServiceEvent.UNREGISTERING) { ProtocolProviderService provider = (ProtocolProviderService) service; Iterator<OtrContact> i = tasks.keySet().iterator(); while (i.hasNext()) { OtrContact otrContact = i.next(); if (provider.equals( otrContact.contact.getProtocolProvider())) { cancel(otrContact); i.remove(); } } } } } private void setSessionStatus(OtrContact contact, ScSessionStatus status) { scSessionStatusMap.put(getSessionID(contact), status); scheduler.cancel(contact); for (ScOtrEngineListener l : getListeners()) l.sessionStatusChanged(contact); } @Override public ScSessionStatus getSessionStatus(OtrContact contact) { SessionID sessionID = getSessionID(contact); SessionStatus sessionStatus = otrEngine.getSession(sessionID).getSessionStatus(); ScSessionStatus scSessionStatus = null; if (!scSessionStatusMap.containsKey(sessionID)) { switch (sessionStatus) { case PLAINTEXT: scSessionStatus = ScSessionStatus.PLAINTEXT; break; case ENCRYPTED: scSessionStatus = ScSessionStatus.ENCRYPTED; break; case FINISHED: scSessionStatus = ScSessionStatus.FINISHED; break; } scSessionStatusMap.put(sessionID, scSessionStatus); } return scSessionStatusMap.get(sessionID); } @Override public boolean isMessageUIDInjected(String mUID) { return injectedMessageUIDs.contains(mUID); } @Override public void launchHelp() { ServiceReference ref = OtrActivator.bundleContext .getServiceReference(BrowserLauncherService.class.getName()); if (ref == null) return; BrowserLauncherService service = (BrowserLauncherService) OtrActivator.bundleContext.getService(ref); service.openURL(OtrActivator.resourceService .getI18NString("plugin.otr.authbuddydialog.HELP_URI")); } @Override public void refreshSession(OtrContact otrContact) { SessionID sessionID = getSessionID(otrContact); try { otrEngine.getSession(sessionID).refreshSession(); } catch (OtrException e) { logger.error("Error refreshing session", e); showError(sessionID, e.getMessage()); } } @Override public void removeListener(ScOtrEngineListener l) { synchronized (listeners) { listeners.remove(l); } } /** * Cleans the contactsMap when <tt>ProtocolProviderService</tt> * gets unregistered. */ @Override public void serviceChanged(ServiceEvent ev) { Object service = OtrActivator.bundleContext.getService(ev.getServiceReference()); if (!(service instanceof ProtocolProviderService)) return; if (ev.getType() == ServiceEvent.UNREGISTERING) { if (logger.isDebugEnabled()) { logger.debug( "Unregistering a ProtocolProviderService, cleaning" + " OTR's ScSessionID to Contact map."); logger.debug( "Unregistering a ProtocolProviderService, cleaning" + " OTR's Contact to SpmProgressDialog map."); } ProtocolProviderService provider = (ProtocolProviderService) service; synchronized(contactsMap) { Iterator<OtrContact> i = contactsMap.values().iterator(); while (i.hasNext()) { OtrContact otrContact = i.next(); if (provider.equals( otrContact.contact.getProtocolProvider())) { scSessionStatusMap.remove(getSessionID(otrContact)); i.remove(); } } } Iterator<OtrContact> i = progressDialogMap.keySet().iterator(); while (i.hasNext()) { if (provider.equals(i.next().contact.getProtocolProvider())) i.remove(); } scheduler.serviceChanged(ev); } } @Override public void setContactPolicy(Contact contact, OtrPolicy policy) { ProtocolProviderService pps = contact.getProtocolProvider(); SessionID sessionID = new SessionID( pps.getAccountID().getAccountUniqueID(), contact.getAddress(), pps.getProtocolName()); String propertyID = sessionID + "contact_policy"; if (policy == null) this.configurator.removeProperty(propertyID); else this.configurator.setProperty(propertyID, policy.getPolicy()); for (ScOtrEngineListener l : getListeners()) l.contactPolicyChanged(contact); } @Override public void setGlobalPolicy(OtrPolicy policy) { if (policy == null) this.configurator.removeProperty("GLOBAL_POLICY"); else this.configurator.setProperty("GLOBAL_POLICY", policy.getPolicy()); for (ScOtrEngineListener l : getListeners()) l.globalPolicyChanged(); } public void showError(SessionID sessionID, String err) { OtrContact otrContact = getOtrContact(sessionID); if (otrContact == null) return; Contact contact = otrContact.contact; OtrActivator.uiService.getChat(contact).addMessage( contact.getDisplayName(), new Date(), Chat.ERROR_MESSAGE, err, OperationSetBasicInstantMessaging.DEFAULT_MIME_TYPE); } @Override public void startSession(OtrContact otrContact) { SessionID sessionID = getSessionID(otrContact); ScSessionStatus scSessionStatus = getSessionStatus(otrContact); scSessionStatus = ScSessionStatus.LOADING; scSessionStatusMap.put(sessionID, scSessionStatus); for (ScOtrEngineListener l : getListeners()) { l.sessionStatusChanged(otrContact); } scheduler.scheduleScSessionStatusChange( otrContact, ScSessionStatus.TIMED_OUT); try { otrEngine.getSession(sessionID).startSession(); } catch (OtrException e) { logger.error("Error starting session", e); showError(sessionID, e.getMessage()); } } @Override public String transformReceiving(OtrContact otrContact, String msgText) { SessionID sessionID = getSessionID(otrContact); try { return otrEngine.getSession(sessionID).transformReceiving(msgText); } catch (OtrException e) { logger.error("Error receiving the message", e); showError(sessionID, e.getMessage()); return null; } } @Override public String[] transformSending(OtrContact otrContact, String msgText) { SessionID sessionID = getSessionID(otrContact); try { return otrEngine.getSession(sessionID).transformSending(msgText); } catch (OtrException e) { logger.error("Error transforming the message", e); showError(sessionID, e.getMessage()); return null; } } private Session getSession(OtrContact contact) { SessionID sessionID = getSessionID(contact); return otrEngine.getSession(sessionID); } @Override public void initSmp(OtrContact otrContact, String question, String secret) { Session session = getSession(otrContact); try { session.initSmp(question, secret); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(otrContact.contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.init(); progressDialog.setVisible(true); } catch (OtrException e) { logger.error("Error initializing SMP session with contact " + otrContact.contact.getDisplayName(), e); showError(session.getSessionID(), e.getMessage()); } } @Override public void respondSmp( OtrContact otrContact, InstanceTag receiverTag, String question, String secret) { Session session = getSession(otrContact); try { session.respondSmp(receiverTag, question, secret); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(otrContact.contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.incrementProgress(); progressDialog.setVisible(true); } catch (OtrException e) { logger.error( "Error occured when sending SMP response to contact " + otrContact.contact.getDisplayName(), e); showError(session.getSessionID(), e.getMessage()); } } @Override public void abortSmp(OtrContact otrContact) { Session session = getSession(otrContact); try { session.abortSmp(); SmpProgressDialog progressDialog = progressDialogMap.get(otrContact); if (progressDialog == null) { progressDialog = new SmpProgressDialog(otrContact.contact); progressDialogMap.put(otrContact, progressDialog); } progressDialog.dispose(); } catch (OtrException e) { logger.error("Error aborting SMP session with contact " + otrContact.contact.getDisplayName(), e); showError(session.getSessionID(), e.getMessage()); } } @Override public PublicKey getRemotePublicKey(OtrContact otrContact) { if (otrContact == null) return null; Session session = getSession(otrContact); return session.getRemotePublicKey(); } @Override public List<Session> getSessionInstances(OtrContact otrContact) { if (otrContact == null) return Collections.emptyList(); return getSession(otrContact).getInstances(); } @Override public boolean setOutgoingSession(OtrContact contact, InstanceTag tag) { if (contact == null) return false; Session session = getSession(contact); scSessionStatusMap.remove(session.getSessionID()); return session.setOutgoingInstance(tag); } @Override public Session getOutgoingSession(OtrContact contact) { if (contact == null) return null; SessionID sessionID = getSessionID(contact); return otrEngine.getSession(sessionID).getOutgoingInstance(); } }
datalogics-kam/conan
conans/test/integration/manifest_validation_test.py
import os import unittest from parameterized.parameterized import parameterized from conans.test.utils.tools import TestServer, TestClient, NO_SETTINGS_PACKAGE_ID from conans.model.ref import ConanFileReference from conans.util.files import save, load, md5 from conans.model.ref import PackageReference from conans.paths import CONANFILE, SimplePaths, CONAN_MANIFEST from conans.test.utils.test_files import temp_folder from conans.model.manifest import FileTreeManifest class ManifestValidationTest(unittest.TestCase): def setUp(self): test_server = TestServer() self.servers = {"default": test_server} self.client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]}) conanfile = """from conans import ConanFile class ConanFileTest(ConanFile): name = "Hello" version = "0.1" exports = "*" """ self.files = {CONANFILE: conanfile, "data.txt": "MyData"} self.reference = ConanFileReference.loads("Hello/0.1@lasote/stable") self.client.save(self.files) self.client.run("export . lasote/stable") @parameterized.expand([(True, ), (False, )]) def test_package_test(self, use_abs_folder): self.client.run("install Hello/0.1@lasote/stable --build missing") conanfile = """from conans import ConanFile class ConsumerFileTest(ConanFile): name = "Chat" version = "0.1" requires = "Hello/0.1@lasote/stable" def package_info(self): self.cpp_info.libs = ["MyLib"] """ test_conanfile = """from conans import ConanFile class ConsumerFileTest(ConanFile): requires = "Chat/0.1@lasote/stable" def build(self): self.output.info("LIBS = %s" % self.deps_cpp_info.libs[0]) def test(self): self.output.info("TEST OK") """ if use_abs_folder: output_folder = temp_folder() dest = '="%s"' % output_folder else: dest = "" output_folder = os.path.join(self.client.current_folder, ".conan_manifests") self.client.save({"conanfile.py": conanfile, "test_package/conanfile.py": test_conanfile}, clean_first=True) self.client.run("create . lasote/stable --manifests%s" % dest) self.assertIn("Chat/0.1@lasote/stable (test package): LIBS = MyLib", self.client.out) self.assertIn("Chat/0.1@lasote/stable (test package): TEST OK", self.client.user_io.out) self.assertIn("Installed manifest for 'Chat/0.1@lasote/stable' from local cache", self.client.user_io.out) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable' from local cache", self.client.user_io.out) paths = SimplePaths(output_folder) self.assertTrue(os.path.exists(os.path.join(paths.export(self.reference), CONAN_MANIFEST))) package_reference = PackageReference.loads("Hello/0.1@lasote/stable:%s" % NO_SETTINGS_PACKAGE_ID) self.assertTrue(os.path.exists(os.path.join(paths.package(package_reference), CONAN_MANIFEST))) # now verify self.client.run("create . lasote/stable --verify%s" % dest) self.assertIn("Manifest for 'Hello/0.1@lasote/stable': OK", self.client.user_io.out) self.assertIn("Manifest for '%s': OK" % str(package_reference), self.client.user_io.out) def _capture_verify_manifest(self, reference, remote="local cache", folder=""): self.client.run("install %s --build missing --manifests %s" % (str(reference), folder)) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable' from %s" % remote, self.client.user_io.out) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable:" "%s' from %s" % (NO_SETTINGS_PACKAGE_ID, remote), self.client.user_io.out) real_folder = folder or ".conan_manifests" output_folder = os.path.join(self.client.current_folder, real_folder) paths = SimplePaths(output_folder) self.assertTrue(os.path.exists(os.path.join(paths.export(self.reference), CONAN_MANIFEST))) package_reference = PackageReference.loads("Hello/0.1@lasote/stable:%s" % NO_SETTINGS_PACKAGE_ID) self.assertTrue(os.path.exists(os.path.join(paths.package(package_reference), CONAN_MANIFEST))) # again should do nothing self.client.run("install %s --build missing --manifests %s" % (str(self.reference), folder)) self.assertNotIn("Installed manifest", self.client.user_io.out) # now verify self.client.run("install %s --build missing --verify %s" % (str(self.reference), folder)) self.assertIn("Manifest for 'Hello/0.1@lasote/stable': OK", self.client.user_io.out) self.assertIn("Manifest for '%s': OK" % str(package_reference), self.client.user_io.out) def capture_verify_manifest_test(self): self._capture_verify_manifest("Hello/0.1@lasote/stable") def conanfile_capture_verify_manifest_test(self): files = {"conanfile.txt": "[requires]\nHello/0.1@lasote/stable"} self.client.save(files, clean_first=True) self._capture_verify_manifest(".") def capture_verify_manifest_folder_test(self): self._capture_verify_manifest("Hello/0.1@lasote/stable", folder="my_custom_folder") def conanfile_capture_verify_manifest_folder_test(self): files = {"conanfile.txt": "[requires]\nHello/0.1@lasote/stable"} self.client.save(files, clean_first=True) folder = "mymanifests" self._capture_verify_manifest(".", folder=folder) conanfile = """from conans import ConanFile class ConanFileTest(ConanFile): name = "Hello2" version = "0.1" """ client = TestClient(base_folder=self.client.base_folder) client.save({CONANFILE: conanfile}) client.run("export . lasote/stable") files = {"conanfile.txt": "[requires]\nHello2/0.1@lasote/stable\nHello/0.1@lasote/stable"} self.client.save(files) self.client.run("install . --build missing --manifests %s" % folder) remote = "local cache" package_reference = PackageReference.loads("Hello/0.1@lasote/stable:%s" % NO_SETTINGS_PACKAGE_ID) self.assertIn("Manifest for 'Hello/0.1@lasote/stable': OK", self.client.user_io.out) self.assertIn("Manifest for '%s': OK" % str(package_reference), self.client.user_io.out) self.assertIn("Installed manifest for 'Hello2/0.1@lasote/stable' from %s" % remote, self.client.user_io.out) self.assertIn("Installed manifest for 'Hello2/0.1@lasote/stable:%s' from %s" % (NO_SETTINGS_PACKAGE_ID, remote), self.client.user_io.out) output_folder = os.path.join(self.client.current_folder, folder) paths = SimplePaths(output_folder) self.assertTrue(os.path.exists(os.path.join(paths.export(self.reference), CONAN_MANIFEST))) self.assertTrue(os.path.exists(os.path.join(paths.package(package_reference), CONAN_MANIFEST))) def remote_capture_verify_manifest_test(self): self.client.run("upload %s --all" % str(self.reference)) self.client.run("remove Hello* -f") files = {"conanfile.txt": "[requires]\nHello/0.1@lasote/stable"} self.client.save(files, clean_first=True) self._capture_verify_manifest(".", remote="default") def _failed_verify(self, reference, remote="local cache"): self.client.run("install %s --build missing --manifests" % str(reference)) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable' from %s" % remote, self.client.user_io.out) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable:" "%s' from %s" % (NO_SETTINGS_PACKAGE_ID, remote), self.client.user_io.out) output_folder = os.path.join(self.client.current_folder, ".conan_manifests") paths = SimplePaths(output_folder) self.assertTrue(os.path.exists(os.path.join(paths.export(self.reference), CONAN_MANIFEST))) package_reference = PackageReference.loads("Hello/0.1@lasote/stable:" "%s" % NO_SETTINGS_PACKAGE_ID) self.assertTrue(os.path.exists(os.path.join(paths.package(package_reference), CONAN_MANIFEST))) client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]}) conanfile = """from conans import ConanFile class ConanFileTest(ConanFile): name = "Hello" version = "0.1" exports = "*" """ files = {CONANFILE: conanfile, "data.txt": "MyDataHacked"} # Export and upload the conanfile client.save(files) client.run("export . lasote/stable") client.run("upload %s --all" % str(self.reference)) # now verify, with update self.client.run("remove Hello/0.1@lasote/stable -f") self.client.run("install %s --build missing --verify" % str(self.reference), ignore_error=True) self.assertNotIn("Manifest for 'Hello/0.1@lasote/stable': OK", self.client.user_io.out) self.assertNotIn("Manifest for '%s': OK" % str(package_reference), self.client.user_io.out) self.assertIn("Modified or new manifest 'Hello/0.1@lasote/stable' detected", self.client.user_io.out) def capture_verify_error_manifest_test(self): self._failed_verify("Hello/0.1@lasote/stable") def conanfile_capture_verify_error_manifest_test(self): files = {"conanfile.txt": "[requires]\nHello/0.1@lasote/stable"} self.client.save(files, clean_first=True) self._failed_verify(".") def _failed_package_verify(self, reference, remote="local cache"): self.client.run("install %s --build missing --manifests" % str(reference)) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable' from %s" % remote, self.client.user_io.out) self.assertIn("Installed manifest for 'Hello/0.1@lasote/stable:" "%s' from %s" % (NO_SETTINGS_PACKAGE_ID, remote), self.client.user_io.out) output_folder = os.path.join(self.client.current_folder, ".conan_manifests") paths = SimplePaths(output_folder) self.assertTrue(os.path.exists(os.path.join(paths.export(self.reference), CONAN_MANIFEST))) package_reference = PackageReference.loads("Hello/0.1@lasote/stable:" "%s" % NO_SETTINGS_PACKAGE_ID) self.assertTrue(os.path.exists(os.path.join(paths.package(package_reference), CONAN_MANIFEST))) client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]}) client.save(self.files) client.run("export . lasote/stable") client.run("install Hello/0.1@lasote/stable --build=missing") info = os.path.join(client.paths.package(package_reference), "conaninfo.txt") info_content = load(info) info_content += "# Dummy string" save(info, info_content) package_folder = client.paths.package(package_reference) manifest = FileTreeManifest.load(package_folder) manifest.file_sums["conaninfo.txt"] = md5(info_content) manifest.save(package_folder) client.run("upload %s --all" % str(self.reference)) # now verify, with update self.client.run("remove Hello/0.1@lasote/stable -f") self.client.run("install %s --build missing --verify" % str(self.reference), ignore_error=True) self.assertNotIn("Manifest for 'Hello/0.1@lasote/stable': OK", self.client.user_io.out) self.assertNotIn("Manifest for '%s': OK" % str(package_reference), self.client.user_io.out) self.assertIn("Modified or new manifest '%s' detected" % str(package_reference), self.client.user_io.out) def capture_verify_package_error_manifest_test(self): self._failed_package_verify("Hello/0.1@lasote/stable") def conanfile_capture_verify_package_error_manifest_test(self): files = {"conanfile.txt": "[requires]\nHello/0.1@lasote/stable"} self.client.save(files, clean_first=True) self._failed_package_verify(".") def manifest_wrong_folder_test(self): reference = "Hello/0.1@lasote/stable" self.client.run("install %s --build missing --verify whatever" % str(reference), ignore_error=True) self.assertIn("Manifest folder does not exist:", self.client.user_io.out) def manifest_wrong_args_test(self): reference = "Hello/0.1@lasote/stable" self.client.run("install %s --build missing --verify -m" % str(reference), ignore_error=True) self.assertIn("ERROR: Do not specify both", self.client.user_io.out) self.client.run("install %s --build missing -mi -m" % str(reference), ignore_error=True) self.assertIn("ERROR: Do not specify both", self.client.user_io.out) def test_corrupted_recipe(self): export_path = self.client.paths.export(self.reference) file_path = os.path.join(export_path, "data.txt") save(file_path, "BAD CONTENT") self.client.run("install %s --build missing --manifests" % str(self.reference), ignore_error=True) self.assertIn("Hello/0.1@lasote/stable local cache package is corrupted", self.client.user_io.out) def test_corrupted_package(self): self.client.run("install %s --build missing" % str(self.reference)) package_reference = PackageReference.loads("Hello/0.1@lasote/stable:" "%s" % NO_SETTINGS_PACKAGE_ID) package_path = self.client.paths.package(package_reference) file_path = os.path.join(package_path, "conaninfo.txt") save(file_path, load(file_path) + " ") self.client.run("install %s --build missing --manifests" % str(self.reference), ignore_error=True) self.assertIn("%s local cache package is corrupted" % str(package_reference), self.client.user_io.out)
yinwenhao/merlin-database
merlin-server/src/test/java/com/magic/server/test/TestCRC32.java
<filename>merlin-server/src/test/java/com/magic/server/test/TestCRC32.java package com.magic.server.test; import java.util.zip.CRC32; public class TestCRC32 { public static void main(String[] args) { String uri = "D:\\ETF0325.txt"; long start = System.currentTimeMillis(); for (int i=0; i<1; i++) { CRC32 crc = new CRC32(); crc.update(uri.getBytes()); crc.getValue(); System.out.println((int)crc.getValue()); } System.out.println(System.currentTimeMillis()-start); // System.out.println(crc.getValue()); start = System.currentTimeMillis(); for (int i=0; i<1; i++) { com.magic.util.CRC32 crc2 = new com.magic.util.CRC32(); crc2.update(uri.getBytes()); crc2.getValue(); System.out.println(crc2.getValue()); } System.out.println(System.currentTimeMillis()-start); // System.out.println(crc2.getValue()); start = System.currentTimeMillis(); for (int i=0; i<1000; i++) { CRC32 crc = new CRC32(); crc.update(uri.getBytes()); crc.getValue(); } System.out.println(System.currentTimeMillis()-start); } }
tusharchoudhary0003/Custom-Football-Game
sources/com/google/android/gms/internal/ads/zzdgw.java
package com.google.android.gms.internal.ads; import com.google.android.gms.internal.ads.zzdob.zzb; public final class zzdgw extends zzdob<zzdgw, zza> implements zzdpm { private static volatile zzdpv<zzdgw> zzdv; /* access modifiers changed from: private */ public static final zzdgw zzgur = new zzdgw(); private String zzgtz = ""; private zzdmr zzgua = zzdmr.f27972a; private int zzguq; public static final class zza extends com.google.android.gms.internal.ads.zzdob.zza<zzdgw, zza> implements zzdpm { private zza() { super(zzdgw.zzgur); } /* synthetic */ zza(C9047On on) { this(); } } private zzdgw() { } /* renamed from: l */ public final String mo31470l() { return this.zzgtz; } /* renamed from: m */ public final zzdmr mo31471m() { return this.zzgua; } /* access modifiers changed from: protected */ /* renamed from: a */ public final Object mo30637a(int i, Object obj, Object obj2) { switch (C9047On.f21549a[i - 1]) { case 1: return new zzdgw(); case 2: return new zza(null); case 3: return zzdob.m29885a((zzdpk) zzgur, "\u0000\u0003\u0000\u0000\u0001\u0003\u0003\u0000\u0000\u0000\u0001Ȉ\u0002\n\u0003\f", new Object[]{"zzgtz", "zzgua", "zzguq"}); case 4: return zzgur; case 5: zzdpv<zzdgw> zzdpv = zzdv; if (zzdpv == null) { synchronized (zzdgw.class) { zzdpv = zzdv; if (zzdpv == null) { zzdpv = new zzb<>(zzgur); zzdv = zzdpv; } } } return zzdpv; case 6: return Byte.valueOf(1); case 7: return null; default: throw new UnsupportedOperationException(); } } /* renamed from: n */ public static zzdgw m29443n() { return zzgur; } static { zzdob.m29887a(zzdgw.class, zzgur); } }
andela/ah-codeblooded-frontend
src/pages/SignUpPage/index.js
import React, { Component } from 'react'; import ROUTES from '../../utils/routes'; import Form from '../../containers/SignupForm'; class SignUpPage extends Component { render() { return ( <> <nav className="white black-text"> <div className="container"> <div className="nav-wrapper"> <a href={ROUTES.index} className="brand-logo center black-text logo">{'Author\'<NAME>'}</a> </div> </div> </nav> <div className="row"> <div className="col m4 offset-m4" style={{ paddingTop: '30px' }}> <Form /> </div> </div> </> ); } } export default SignUpPage;
Mumsfilibaba/Lambda
Lambda/Source/Platform/Vulkan/Memory/VKNDynamicMemoryAllocator.h
#pragma once #include "VKNDeviceAllocator.h" namespace Lambda { class VKNDynamicMemoryPage; struct VKNDynamicMemoryBlock; //-------------------- //VKNDynamicAllocation //-------------------- struct VKNDynamicAllocation { public: VKNDynamicAllocation& operator=(const VKNDynamicAllocation& other) { memcpy(this, &other, sizeof(VKNDynamicAllocation)); return *this; } public: VKNDynamicMemoryBlock* pBlock = nullptr; uint8* pHostMemory = nullptr; VkBuffer Buffer = VK_NULL_HANDLE; VkDeviceSize BufferOffset = 0; }; //--------------------- //VKNDynamicMemoryBlock //--------------------- struct VKNDynamicMemoryBlock { VKNDynamicMemoryPage* pPage = nullptr; VKNDynamicMemoryBlock* pNext = nullptr; VKNDynamicMemoryBlock* pPrevious = nullptr; VkDeviceSize SizeInBytes = 0; VkDeviceSize BufferOffset = 0; #if defined(LAMBDA_DEBUG) uint32 ID = 0; #endif bool IsFree = true; }; //--------------------- //VKNDynamicMemoryPage //--------------------- class VKNDynamicMemoryPage { private: //------------ //VKNBlockPool //------------ class VKNBlockPool { public: VKNBlockPool(uint32 numBlocks); ~VKNBlockPool(); VKNDynamicMemoryBlock* Get(); void Return(VKNDynamicMemoryBlock* pBlock); private: VKNDynamicMemoryBlock* AllocateBlocks(uint32 numBlocks); private: std::vector<VKNDynamicMemoryBlock*> m_Chains; VKNDynamicMemoryBlock* m_pHead; }; public: LAMBDA_NO_COPY(VKNDynamicMemoryPage); VKNDynamicMemoryPage(VKNDevice* pVkDevice, uint32 id, VkDeviceSize sizeInBytes); ~VKNDynamicMemoryPage() = default; bool Allocate(VKNDynamicAllocation& allocation, VkDeviceSize sizeInBytes, VkDeviceSize alignment); void Deallocate(VKNDynamicAllocation& allocation); void Destroy(VKNDevice* pVkDevice); _forceinline bool IsEmpty() const { return m_pHead->pPrevious == nullptr && m_pHead->pNext == nullptr && m_pHead->IsFree; } _forceinline uint64 GetSize() const { return m_SizeInBytes; } private: void Init(VKNDevice* pVkDevice); private: VKNDynamicMemoryBlock* m_pHead; VKNDynamicMemoryBlock* m_pNextFree; VKNBlockPool m_VkBlockPool; VKNAllocation m_Memory; const uint64 m_SizeInBytes; VkBuffer m_VkBuffer; const uint32 m_ID; uint32 m_BlockCount; }; //------------------------- //VKNDynamicMemoryAllocator //------------------------- class VKNDynamicMemoryAllocator final { public: LAMBDA_NO_COPY(VKNDynamicMemoryAllocator); VKNDynamicMemoryAllocator(VKNDevice* pVkDevice); ~VKNDynamicMemoryAllocator(); bool Allocate(VKNDynamicAllocation& allocation, uint64 sizeInBytes, uint64 alignment); void Deallocate(VKNDynamicAllocation& allocation); void EmptyGarbageMemory(); _forceinline uint64 GetTotalReserved() const { return m_TotalReserved; } _forceinline uint64 GetTotalAllocated() const { return m_TotalAllocated; } private: std::vector<VKNDynamicMemoryPage*> m_Pages; std::vector<VKNDynamicMemoryPage*> m_EmptyPages; std::vector<std::vector<VKNDynamicAllocation>> m_MemoryToDeallocate; VKNDevice* m_pVkDevice; VKNDynamicMemoryPage* m_pCurrentPage; uint64 m_FrameIndex; uint64 m_TotalReserved; uint64 m_TotalAllocated; }; }
jforge/vaadin
shared/src/main/java/com/vaadin/shared/JsonConstants.java
<gh_stars>0 /* * Copyright 2000-2016 Vaadin Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.vaadin.shared; import java.io.Serializable; public class JsonConstants implements Serializable { public static final String VTYPE_CONNECTOR = "c"; public static final String VTYPE_BOOLEAN = "b"; public static final String VTYPE_DOUBLE = "d"; public static final String VTYPE_FLOAT = "f"; public static final String VTYPE_LONG = "l"; public static final String VTYPE_INTEGER = "i"; public static final String VTYPE_STRING = "s"; public static final String VTYPE_ARRAY = "a"; public static final String VTYPE_STRINGARRAY = "S"; public static final String VTYPE_MAP = "m"; public static final String VTYPE_LIST = "L"; public static final String VTYPE_SET = "q"; public static final String VTYPE_NULL = "n"; public static final String JSON_CONTENT_TYPE = "application/json; charset=UTF-8"; }
Youssef1313/cpp-docs
docs/mfc/codesnippet/CPP/cmapstringtoob-class_9.cpp
<reponame>Youssef1313/cpp-docs CMapStringToOb map; map.SetAt(_T("Bart"), new CAge(13)); map.SetAt(_T("Lisa"), new CAge(11)); map.SetAt(_T("Homer"), new CAge(36)); map.SetAt(_T("Marge"), new CAge(35)); map.RemoveKey(_T("Lisa")); // Memory leak: CAge object not // deleted. #ifdef _DEBUG afxDump.SetDepth(1); afxDump << _T("RemoveKey example: ") << &map << _T("\n"); #endif
mahaaveerz/FiloDB
kafka/src/test/scala/filodb/kafka/MergeableProducerConfigSpec.scala
package filodb.kafka import com.typesafe.config.ConfigFactory import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.producer.{ProducerConfig, SinkConfig} import org.apache.kafka.common.serialization.{LongSerializer, StringSerializer} class MergeableProducerConfigSpec extends AbstractSpec { "MergeableProducerConfig" must { "producer test" in { val topic = "test" val partitions = 1 val settings = new KafkaSettings(ConfigFactory.parseString( s""" |filodb.kafka.config.file="./src/test/resources/full-test.properties" |filodb.kafka.topics.ingestion=$topic |filodb.kafka.partitions=$partitions |filodb.kafka.record-converter="filodb.kafka.StringRecordConverter" """.stripMargin)) val config = new SinkConfig(settings.BootstrapServers, settings.clientId, settings.nativeKafkaConfig) val values = config.kafkaConfig values(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG) must be ("localhost:9092") values(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) must be (classOf[LongSerializer].getName) values(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG) must be (classOf[StringSerializer].getName) val props = values.asProps props.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG) must be (settings.BootstrapServers) props.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) must be (classOf[LongSerializer].getName) props.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG) must be (classOf[StringSerializer].getName) } } }
fax001/tink
python/tink/_keyset_reader.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Reads Keysets from file.""" import abc from google.protobuf import json_format from google.protobuf import message from tink.proto import tink_pb2 from tink import core class KeysetReader(metaclass=abc.ABCMeta): """Reads a Keyset.""" @abc.abstractmethod def read(self) -> tink_pb2.Keyset: """Reads and returns a (cleartext) tink_pb2.Keyset from its source.""" raise NotImplementedError() @abc.abstractmethod def read_encrypted(self) -> tink_pb2.EncryptedKeyset: """Reads and returns an tink_pb2.EncryptedKeyset from its source.""" raise NotImplementedError() class JsonKeysetReader(KeysetReader): """Reads a JSON Keyset.""" def __init__(self, serialized_keyset: str): self._serialized_keyset = serialized_keyset def read(self) -> tink_pb2.Keyset: try: return json_format.Parse(self._serialized_keyset, tink_pb2.Keyset()) except json_format.ParseError as e: raise core.TinkError(e) def read_encrypted(self) -> tink_pb2.EncryptedKeyset: try: return json_format.Parse(self._serialized_keyset, tink_pb2.EncryptedKeyset()) except json_format.ParseError as e: raise core.TinkError(e) class BinaryKeysetReader(KeysetReader): """Reads a binary Keyset.""" def __init__(self, serialized_keyset: bytes): self._serialized_keyset = serialized_keyset def read(self) -> tink_pb2.Keyset: if not self._serialized_keyset: raise core.TinkError('No keyset found') try: return tink_pb2.Keyset.FromString(self._serialized_keyset) except message.DecodeError as e: raise core.TinkError(e) def read_encrypted(self) -> tink_pb2.EncryptedKeyset: if not self._serialized_keyset: raise core.TinkError('No keyset found') try: return tink_pb2.EncryptedKeyset.FromString(self._serialized_keyset) except message.DecodeError as e: raise core.TinkError(e)
markriedl/gaige
homework2/runrandomnavigator4.py
<gh_stars>10-100 ''' * Copyright (c) 2014, 2015 Entertainment Intelligence Lab, Georgia Institute of Technology. * Originally developed by <NAME>. * Last edited by <NAME> 05/2015 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ''' import sys, pygame, math, numpy, random, time, copy from pygame.locals import * from constants import * from utils import * from core import * from randomnavigator import * from mybuildpathnetwork import * pathnodes = [(50, 50), (600, 50), (50, 550), (500, 450), (900, 175), (75, 900), (450, 950), (700, 650), (950, 650), (850, 940)] nav = RandomNavigator() world = GameWorld(SEED, (1000, 1000), (1000, 1000)) agent = Agent(AGENT, (200, 100), 0, SPEED, world) world.initializeTerrain([[(320, 110), (480, 200), (370, 400), (100, 435), (180, 250)], [(740, 160), (940, 450), (800, 540), (600, 410)], [(285, 550), (400, 755), (150, 745)], [(590, 750), (910, 720), (925, 870), (580, 870)]], (0, 0, 0), 4, TREE) world.setPlayerAgent(agent) nav.setWorld(world) agent.setNavigator(nav) world.initializeRandomResources(NUMRESOURCES) world.debugging = True for n in pathnodes: drawCross(world.debug, n) nav.pathnodes = pathnodes nav.pathnetwork = myBuildPathNetwork(pathnodes, world, agent) nav.drawPathNetwork(world.debug) world.run()
spbooth/SAFE-WEBAPP
src/main/java/uk/ac/ed/epcc/webapp/model/data/reference/IndexedDataCache.java
<reponame>spbooth/SAFE-WEBAPP<gh_stars>1-10 //| Copyright - The University of Edinburgh 2011 | //| | //| Licensed under the Apache License, Version 2.0 (the "License"); | //| you may not use this file except in compliance with the License. | //| You may obtain a copy of the License at | //| | //| http://www.apache.org/licenses/LICENSE-2.0 | //| | //| Unless required by applicable law or agreed to in writing, software | //| distributed under the License is distributed on an "AS IS" BASIS, | //| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.| //| See the License for the specific language governing permissions and | //| limitations under the License. | /******************************************************************************* * Copyright (c) - The University of Edinburgh 2010 *******************************************************************************/ package uk.ac.ed.epcc.webapp.model.data.reference; import java.util.HashMap; import java.util.Map; import uk.ac.ed.epcc.webapp.AppContext; import uk.ac.ed.epcc.webapp.Indexed; import uk.ac.ed.epcc.webapp.jdbc.exception.DataException; import uk.ac.ed.epcc.webapp.model.data.DataCache; /** DataCache for Indexed types. * Entries can also be located by integer id. * * @author spb * * @param <K> key used to lookup target * @param <D> target type */ public abstract class IndexedDataCache<K,D extends Indexed> extends DataCache<K,D> { private Map<IndexedReference<? extends D>,D> index_map = new HashMap<>(); private AppContext c; public IndexedDataCache(AppContext c){ this.c=c; } @Override protected final D find(K key) throws DataException { D res = findIndexed(key); if( res != null ){ index_map.put(getReference(res),res); } return res; } public D find(IndexedReference<D> id){ if( id == null || id.isNull()){ return null; } D res = index_map.get(id); if( res == null){ res = id.getIndexed(c); if( res != null ){ index_map.put(id, res); } } return res; } protected abstract D findIndexed(K key) throws DataException; protected abstract IndexedReference<? extends D> getReference(D dat); }
EMBL-EBI-SUBS/subs-data-model
src/main/java/uk/ac/ebi/subs/data/component/Funding.java
<reponame>EMBL-EBI-SUBS/subs-data-model<gh_stars>0 package uk.ac.ebi.subs.data.component; import lombok.Data; @Data public class Funding { private String grantId; private String organization; private String grantTitle; }
zealoussnow/chromium
ios/web_view/internal/cwv_preview_element_info_internal.h
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_WEB_VIEW_INTERNAL_CWV_PREVIEW_ELEMENT_INFO_INTERNAL_H_ #define IOS_WEB_VIEW_INTERNAL_CWV_PREVIEW_ELEMENT_INFO_INTERNAL_H_ #import <Foundation/Foundation.h> #import "ios/web_view/public/cwv_preview_element_info.h" NS_ASSUME_NONNULL_BEGIN @interface CWVPreviewElementInfo () - (instancetype)initWithLinkURL:(NSURL*)linkURL NS_DESIGNATED_INITIALIZER; @end NS_ASSUME_NONNULL_END #endif // IOS_WEB_VIEW_INTERNAL_CWV_PREVIEW_ELEMENT_INFO_INTERNAL_H_
daejoon/fixture-monkey
fixture-monkey-api/src/main/java/com/navercorp/fixturemonkey/api/property/CompositeProperty.java
<gh_stars>100-1000 /* * Fixture Monkey * * Copyright (c) 2021-present NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.navercorp.fixturemonkey.api.property; import java.lang.annotation.Annotation; import java.lang.reflect.AnnotatedType; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Optional; import javax.annotation.Nullable; import org.apiguardian.api.API; import org.apiguardian.api.API.Status; @API(since = "0.4.0", status = Status.EXPERIMENTAL) public final class CompositeProperty implements Property { private final Property primaryProperty; private final Property secondaryProperty; public CompositeProperty(Property primaryProperty, Property secondaryProperty) { this.primaryProperty = primaryProperty; this.secondaryProperty = secondaryProperty; } public Property getPrimaryProperty() { return this.primaryProperty; } public Property getSecondaryProperty() { return this.secondaryProperty; } @Override public Type getType() { return this.primaryProperty.getType(); } @Override public AnnotatedType getAnnotatedType() { return this.primaryProperty.getAnnotatedType(); } @Override public String getName() { return this.primaryProperty.getName(); } @Override public List<Annotation> getAnnotations() { List<Annotation> annotations = new ArrayList<>(); annotations.addAll(this.primaryProperty.getAnnotations()); annotations.addAll(this.secondaryProperty.getAnnotations()); return annotations; } @Override public <T extends Annotation> Optional<T> getAnnotation(Class<T> annotationClass) { Optional<T> annotation = this.primaryProperty.getAnnotation(annotationClass); if (!annotation.isPresent()) { annotation = this.secondaryProperty.getAnnotation(annotationClass); } return annotation; } @Nullable @Override public Object getValue(Object obj) { Object result = this.primaryProperty.getValue(obj); if (result == null) { result = this.secondaryProperty.getValue(obj); } return result; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } CompositeProperty that = (CompositeProperty)obj; return Objects.equals(primaryProperty, that.primaryProperty) && Objects.equals(secondaryProperty, that.secondaryProperty); } @Override public int hashCode() { return Objects.hash(primaryProperty, secondaryProperty); } @Override public String toString() { return "CompositeProperty{" + "primaryProperty=" + primaryProperty + ", secondaryProperty=" + secondaryProperty + '}'; } }
John-ye666/Python-for-Finance-Second-Edition
Chapter10/c10_33_implied_vol_EuropeanPut_min.py
<reponame>John-ye666/Python-for-Finance-Second-Edition<gh_stars>100-1000 """ Name : c10_33_implied_vol_EuropeanPut_min.py Book : Python for Finance (2nd ed.) Publisher: Packt Publishing Ltd. Author : <NAME> Date : 6/6/2017 email : <EMAIL> <EMAIL> """ from scipy import log,exp,sqrt,stats def implied_vol_put_min(S,X,T,r,p): implied_vol=1.0 min_value=100.0 for i in xrange(1,10000): sigma=0.0001*(i+1) d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) d2 = d1-sigma*sqrt(T) put=X*exp(-r*T)*stats.norm.cdf(-d2)-S*stats.norm.cdf(-d1) abs_diff=abs(put-p) if abs_diff<min_value: min_value=abs_diff implied_vol=sigma k=i put_out=put print 'k, implied_vol, put, abs_diff' return k,implied_vol, put_out,min_value
Alone-space/autoplan
src/main/java/com/push/model/RetryContext.java
package com.push.model; import lombok.Getter; /** * @author itning * @since 2021/3/22 17:25 */ @Getter public class RetryContext { /** * 推送URL */ private final String url; /** * 推送请求体内容 */ private final String body; /** * 失败后重试次数 */ private final int numberOfRetries; /** * 失败后重试间隔(毫秒) */ private final long retryInterval; public RetryContext(String url, String body, int numberOfRetries, long retryInterval) { this.url = url; this.body = body; this.numberOfRetries = numberOfRetries; this.retryInterval = retryInterval; } }
hangqiu/pixie
src/common/base/magic_enum_test.cc
/* * Copyright 2018- The Pixie Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ #include <magic_enum.hpp> #include "src/common/testing/testing.h" // The following test cases are mostly from MagicEnum docs: https://github.com/Neargye/magic_enum. // They serve as a good example of the capabilities of MagicEnum. // Note that pretty much all the tests/examples below can be run as constexpr too. enum class Color { RED = 2, BLUE = 4, GREEN = 8 }; constexpr std::size_t kColorCount = magic_enum::enum_count<Color>(); // Tests values that go beyond MAGIC_ENUM_RANGE_MAX. enum class WideColor { INFRARED = -1024, RED = 2, VIOLET = 256, ULTRAVIOLET = 1024 }; TEST(MagicEnum, num_elements) { EXPECT_EQ(kColorCount, 3); } TEST(MagicEnum, enum_to_string) { Color color = Color::RED; std::string_view color_name = magic_enum::enum_name(color); EXPECT_EQ(color_name, "RED"); } TEST(MagicEnum, unknown_enum_to_string) { Color color = static_cast<Color>(1); std::string_view color_name = magic_enum::enum_name(color); EXPECT_EQ(color_name, ""); } // This tests that we have MAGIC_ENUM_RANGE_MAX set properly. TEST(MagicEnum, large_enum_to_string) { // We can see VIOLET because its value is exactly MAGIC_ENUM_RANGE_MAX. { WideColor color = WideColor::VIOLET; std::string_view color_name = magic_enum::enum_name(color); EXPECT_EQ(color_name, "VIOLET"); } // We can't see ULTRAVIOLET because the value is beyond MAGIC_ENUM_RANGE_MAX. { WideColor color = WideColor::ULTRAVIOLET; std::string_view color_name = magic_enum::enum_name(color); EXPECT_EQ(color_name, ""); } // We can't see INFRARED because the value is below MAGIC_ENUM_RANGE_MIN. { WideColor color = WideColor::INFRARED; std::string_view color_name = magic_enum::enum_name(color); EXPECT_EQ(color_name, ""); } } // Like enum_to_string above, but passing name through template parameter. // Good for constexpr. TEST(MagicEnum, static_enum_to_string) { constexpr Color color = Color::BLUE; std::string_view color_name = magic_enum::enum_name<color>(); CHECK_EQ(color_name, "BLUE"); } TEST(MagicEnum, enum_to_integer) { Color color = Color::RED; EXPECT_EQ(magic_enum::enum_integer(color), 2); } TEST(MagicEnum, valid_string_to_enum) { std::string_view color_name("GREEN"); std::optional<Color> color = magic_enum::enum_cast<Color>(color_name); ASSERT_TRUE(color.has_value()); EXPECT_EQ(color.value(), Color::GREEN); } TEST(MagicEnum, invalid_string_to_enum) { std::string_view color_name("YURPLE"); std::optional<Color> color = magic_enum::enum_cast<Color>(color_name); ASSERT_FALSE(color.has_value()); } TEST(MagicEnum, valid_integer_to_enum) { std::optional<Color> color = magic_enum::enum_cast<Color>(2); ASSERT_TRUE(color.has_value()); EXPECT_EQ(color.value(), Color::RED); } TEST(MagicEnum, invalid_integer_to_enum) { std::optional<Color> color = magic_enum::enum_cast<Color>(999); ASSERT_FALSE(color.has_value()); } TEST(MagicEnum, valid_indexed_access) { Color color = magic_enum::enum_value<Color>(1); EXPECT_EQ(color, Color::BLUE); } // Note that this test must be in a Test Suite than the rest, according to: // https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#death-test-naming TEST(MagicEnumDeathTest, invalid_indexed_access) { #if !defined(NDEBUG) EXPECT_DEATH((void)magic_enum::enum_value<Color>(999), ""); #endif } TEST(MagicEnum, enum_value_sequence) { std::array<Color, kColorCount> colors = magic_enum::enum_values<Color>(); std::array<Color, kColorCount> expected_values = {Color::RED, Color::BLUE, Color::GREEN}; EXPECT_EQ(colors, expected_values); } TEST(MagicEnum, enum_names_sequence) { std::array<std::string_view, kColorCount> color_names = magic_enum::enum_names<Color>(); std::array<std::string_view, kColorCount> expected_values = {"RED", "BLUE", "GREEN"}; EXPECT_EQ(color_names, expected_values); } TEST(MagicEnum, enum_entries_sequence) { std::array<std::pair<Color, std::string_view>, kColorCount> color_entries = magic_enum::enum_entries<Color>(); std::array<std::pair<Color, std::string_view>, kColorCount> expected_entries = { std::pair<Color, std::string_view>{Color::RED, "RED"}, std::pair<Color, std::string_view>{Color::BLUE, "BLUE"}, std::pair<Color, std::string_view>{Color::GREEN, "GREEN"}}; EXPECT_EQ(color_entries, expected_entries); } TEST(MagicEnum, ostream_operator) { using magic_enum::ostream_operators::operator<<; Color color = Color::BLUE; std::ostringstream buffer; buffer << color << std::endl; EXPECT_EQ(buffer.str(), "BLUE\n"); } TEST(MagicEnum, bitwise_operators) { enum class Flags { A = 1 << 0, B = 1 << 1, C = 1 << 2, D = 1 << 3 }; using magic_enum::bitwise_operators::operator|; using magic_enum::bitwise_operators::operator&; using magic_enum::bitwise_operators::operator~; Flags flagsAB = Flags::A | Flags::B; Flags flagsBC = Flags::B | Flags::C; EXPECT_TRUE((flagsAB & Flags::A) == Flags::A); EXPECT_FALSE((flagsAB & Flags::C) == Flags::C); EXPECT_TRUE(((flagsAB & flagsBC) & Flags::B) == Flags::B); EXPECT_FALSE(((flagsAB & flagsBC) & Flags::C) == Flags::C); EXPECT_TRUE(((flagsAB | flagsBC) & Flags::C) == Flags::C); EXPECT_TRUE(((flagsAB & ~flagsBC) & Flags::A) == Flags::A); EXPECT_FALSE(((flagsAB & ~flagsBC) & Flags::B) == Flags::B); } TEST(MagicEnum, is_unscoped_enum) { enum color { red, green, blue }; enum class direction { left, right }; EXPECT_TRUE(magic_enum::is_unscoped_enum<color>::value); EXPECT_FALSE(magic_enum::is_unscoped_enum<direction>::value); EXPECT_FALSE(magic_enum::is_unscoped_enum<int>::value); EXPECT_TRUE(magic_enum::is_unscoped_enum_v<color>); EXPECT_FALSE(magic_enum::is_unscoped_enum_v<direction>); EXPECT_FALSE(magic_enum::is_unscoped_enum_v<int>); EXPECT_FALSE(magic_enum::is_scoped_enum<color>::value); EXPECT_TRUE(magic_enum::is_scoped_enum<direction>::value); EXPECT_FALSE(magic_enum::is_scoped_enum<int>::value); EXPECT_FALSE(magic_enum::is_scoped_enum_v<color>); EXPECT_TRUE(magic_enum::is_scoped_enum_v<direction>); EXPECT_FALSE(magic_enum::is_scoped_enum_v<int>); }
HermanLederer/gtaRenderHook
RHEngineLib/Engine/Common/IImageView.h
<reponame>HermanLederer/gtaRenderHook<filename>RHEngineLib/Engine/Common/IImageView.h #pragma once namespace rh::engine { class IImageView { public: virtual ~IImageView() = default; }; } // namespace rh::engine
se77enn/LeetCode-Solution
Python/asteroid-collision.py
<reponame>se77enn/LeetCode-Solution # Time: O(n) # Space: O(n) try: xrange # Python 2 except NameError: xrange = range # Python 3 class Solution(object): def asteroidCollision(self, asteroids): """ :type asteroids: List[int] :rtype: List[int] """ result = [] for asteroid in asteroids: while result and asteroid < 0 < result[-1]: if result[-1] < -asteroid: result.pop() continue elif result[-1] == -asteroid: result.pop() break else: result.append(asteroid) return result
georghinkel/ttc2017smartGrids
solutions/eMoflon/rgse.ttc17.metamodels.src/src/gluemodel/CIM/IEC61970/LoadModel/impl/NonConformLoadScheduleImpl.java
/** */ package gluemodel.CIM.IEC61970.LoadModel.impl; import gluemodel.CIM.IEC61970.LoadModel.LoadModelPackage; import gluemodel.CIM.IEC61970.LoadModel.NonConformLoadGroup; import gluemodel.CIM.IEC61970.LoadModel.NonConformLoadSchedule; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.common.notify.NotificationChain; import org.eclipse.emf.ecore.EClass; import org.eclipse.emf.ecore.InternalEObject; import org.eclipse.emf.ecore.impl.ENotificationImpl; /** * <!-- begin-user-doc --> * An implementation of the model object '<em><b>Non Conform Load Schedule</b></em>'. * <!-- end-user-doc --> * <p> * The following features are implemented: * </p> * <ul> * <li>{@link gluemodel.CIM.IEC61970.LoadModel.impl.NonConformLoadScheduleImpl#getNonConformLoadGroup <em>Non Conform Load Group</em>}</li> * </ul> * * @generated */ public class NonConformLoadScheduleImpl extends SeasonDayTypeScheduleImpl implements NonConformLoadSchedule { /** * The cached value of the '{@link #getNonConformLoadGroup() <em>Non Conform Load Group</em>}' reference. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @see #getNonConformLoadGroup() * @generated * @ordered */ protected NonConformLoadGroup nonConformLoadGroup; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ protected NonConformLoadScheduleImpl() { super(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override protected EClass eStaticClass() { return LoadModelPackage.Literals.NON_CONFORM_LOAD_SCHEDULE; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public NonConformLoadGroup getNonConformLoadGroup() { if (nonConformLoadGroup != null && nonConformLoadGroup.eIsProxy()) { InternalEObject oldNonConformLoadGroup = (InternalEObject)nonConformLoadGroup; nonConformLoadGroup = (NonConformLoadGroup)eResolveProxy(oldNonConformLoadGroup); if (nonConformLoadGroup != oldNonConformLoadGroup) { if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.RESOLVE, LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP, oldNonConformLoadGroup, nonConformLoadGroup)); } } return nonConformLoadGroup; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public NonConformLoadGroup basicGetNonConformLoadGroup() { return nonConformLoadGroup; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public NotificationChain basicSetNonConformLoadGroup(NonConformLoadGroup newNonConformLoadGroup, NotificationChain msgs) { NonConformLoadGroup oldNonConformLoadGroup = nonConformLoadGroup; nonConformLoadGroup = newNonConformLoadGroup; if (eNotificationRequired()) { ENotificationImpl notification = new ENotificationImpl(this, Notification.SET, LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP, oldNonConformLoadGroup, newNonConformLoadGroup); if (msgs == null) msgs = notification; else msgs.add(notification); } return msgs; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public void setNonConformLoadGroup(NonConformLoadGroup newNonConformLoadGroup) { if (newNonConformLoadGroup != nonConformLoadGroup) { NotificationChain msgs = null; if (nonConformLoadGroup != null) msgs = ((InternalEObject)nonConformLoadGroup).eInverseRemove(this, LoadModelPackage.NON_CONFORM_LOAD_GROUP__NON_CONFORM_LOAD_SCHEDULES, NonConformLoadGroup.class, msgs); if (newNonConformLoadGroup != null) msgs = ((InternalEObject)newNonConformLoadGroup).eInverseAdd(this, LoadModelPackage.NON_CONFORM_LOAD_GROUP__NON_CONFORM_LOAD_SCHEDULES, NonConformLoadGroup.class, msgs); msgs = basicSetNonConformLoadGroup(newNonConformLoadGroup, msgs); if (msgs != null) msgs.dispatch(); } else if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP, newNonConformLoadGroup, newNonConformLoadGroup)); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public NotificationChain eInverseAdd(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP: if (nonConformLoadGroup != null) msgs = ((InternalEObject)nonConformLoadGroup).eInverseRemove(this, LoadModelPackage.NON_CONFORM_LOAD_GROUP__NON_CONFORM_LOAD_SCHEDULES, NonConformLoadGroup.class, msgs); return basicSetNonConformLoadGroup((NonConformLoadGroup)otherEnd, msgs); } return super.eInverseAdd(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP: return basicSetNonConformLoadGroup(null, msgs); } return super.eInverseRemove(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP: if (resolve) return getNonConformLoadGroup(); return basicGetNonConformLoadGroup(); } return super.eGet(featureID, resolve, coreType); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP: setNonConformLoadGroup((NonConformLoadGroup)newValue); return; } super.eSet(featureID, newValue); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void eUnset(int featureID) { switch (featureID) { case LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP: setNonConformLoadGroup((NonConformLoadGroup)null); return; } super.eUnset(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public boolean eIsSet(int featureID) { switch (featureID) { case LoadModelPackage.NON_CONFORM_LOAD_SCHEDULE__NON_CONFORM_LOAD_GROUP: return nonConformLoadGroup != null; } return super.eIsSet(featureID); } } //NonConformLoadScheduleImpl
cwhelan/cloudbreak
src/main/java/edu/ohsu/sonmezsysbio/cloudbreak/mapper/MrFastSingleEndMapper.java
package edu.ohsu.sonmezsysbio.cloudbreak.mapper; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.log4j.Logger; import java.io.*; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.zip.GZIPInputStream; /** * Created by IntelliJ IDEA. * User: cwhelan * Date: 7/17/12 * Time: 5:46 PM */ public class MrFastSingleEndMapper extends SingleEndAlignerMapper { private static Logger logger = Logger.getLogger(MrFastSingleEndMapper.class); private String reference; private String mrfastExecutable; private int threshold = -1; @Override public void configure(JobConf job) { super.configure(job); logger.info("Current dir: " + new File(".").getAbsolutePath()); reference = job.get("mrfast.reference"); mrfastExecutable = job.get("mrfast.executable"); if (job.get("mrfast.threshold") != null) { threshold = Integer.parseInt(job.get("mrfast.threshold")); } } @Override public void close() throws IOException { super.close(); if (! s1File.exists()) { logger.error("file does not exist: " + s1File.getPath()); } else { logger.info("read file length: " + s1File.length()); } File indexFile = new File(reference); if (! indexFile.exists()) { logger.error("index file does not exist: " + indexFile.getPath()); } else { logger.info("index file length: " + indexFile.length()); } String[] commandLine = buildCommandLine(mrfastExecutable, reference, s1File.getPath(), threshold); logger.info("Executing command: " + Arrays.toString(commandLine)); Process p = Runtime.getRuntime().exec(commandLine); logger.debug("Exec'd"); try { p.waitFor(); } catch (InterruptedException e) { e.printStackTrace(); throw new RuntimeException(e); } logger.info("process finished with exit code: " + p.exitValue()); BufferedReader stdInput = new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(new File("output.gz"))))); readAlignments(stdInput, p.getErrorStream()); } protected void readAlignments(BufferedReader stdInput, InputStream errorStream) throws IOException { String errLine; BufferedReader errorReader = new BufferedReader(new InputStreamReader(errorStream)); while ((errLine = errorReader.readLine()) != null) { logger.error("ERROR: " + errLine); } String outLine; while ((outLine = stdInput.readLine()) != null) { String readPairId = outLine.substring(0,outLine.indexOf('\t')-2); String condensedAlignmentLine = condenseAlignmentLine(outLine); getOutput().collect(new Text(readPairId), new Text(condensedAlignmentLine)); } } /** * MRfast SAM output is very long so only pick out the important fields: alignment location and number * of mismatches * @param outLine * @return */ private String condenseAlignmentLine(String outLine) { String[] fields = outLine.split("\t"); String readId = fields[0]; String orientation = "0".equals(fields[1]) ? "F" : "R"; String chrom = fields[2]; String position = fields[3]; String sequenceLength = String.valueOf(fields[9].length()); String nm = "NA"; for (int i = 4; i < fields.length; i++) { if (fields[i].startsWith("NM:i:")) { nm = fields[i].substring(5); break; } } return readId + "\t" + orientation + "\t" + chrom + "\t" + position + "\t" + nm + "\t" + sequenceLength; } protected static String[] buildCommandLine(String mrfastExecutable, String reference, String path1, int threshold) { List<String> commandArgs = new ArrayList<String>(); commandArgs.add("./" + mrfastExecutable); commandArgs.add("--search"); commandArgs.add(reference); commandArgs.add("--seq"); commandArgs.add(path1); commandArgs.add("--outcomp"); commandArgs.add("--seqcomp"); if (threshold != -1) { commandArgs.add("-e"); commandArgs.add(String.valueOf(threshold)); } return (String[]) commandArgs.toArray(new String[1]); } @Override protected String getCommandName() { return "mrfast"; } }
roblkenn/EECS441-Mobile-App
FrontEnd/2-modules/market/index.js
<reponame>roblkenn/EECS441-Mobile-App export { default as Market } from "./Market"; export * from "./ducks";
colinw7/CJavaScript
data/charAt.js
msg = "Hello"; msg.charAt(1);
javiertuya/selema
java/src/main/java/giis/selema/manager/CiServiceFactory.java
<gh_stars>1-10 package giis.selema.manager; import giis.selema.portable.JavaCs; import giis.selema.services.ICiService; import giis.selema.services.impl.GithubService; import giis.selema.services.impl.JenkinsService; import giis.selema.services.impl.LocalService; /** * Creation of instances of the appropriate CI service */ public class CiServiceFactory { /** * Creates of the CI service instance that the system is currently running on */ public ICiService getCurrent() { if (isJenkins()) return new JenkinsService(); else if (isGithub()) return new GithubService(); else return new LocalService(); } public boolean isJenkins() { String envVar=JavaCs.getEnvironmentVariable("JENKINS_HOME"); return envVar!=null && !"".equals(envVar); } public boolean isGithub() { String envVar=JavaCs.getEnvironmentVariable("GITHUB_ACTIONS"); return envVar!=null && "true".equals(envVar); } //Gitlab: GITLAB_CI set to true }
mass-project/mass_server
mass_flask_config/config_testing.py
from mass_flask_config.config_base import BaseConfig class TestingConfig(BaseConfig): MASS_TESTING = True MONGODB_SETTINGS = { 'host': 'mongodb://localhost:27017/mass-flask-testing', 'tz_aware': True }
Searcher23/Searcher
src/org/geometerplus/zlibrary/core/filesystem/tar/ZLTarHeader.java
/* * Copyright (C) 2007-2014 <NAME> <<EMAIL>> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ package org.geometerplus.zlibrary.core.filesystem.tar; import java.io.InputStream; import java.io.IOException; class ZLTarHeader { String Name; int Size; boolean IsRegularFile; private static String getStringFromByteArray(byte[] buffer) { String s = new String(buffer); final int indexOfZero = s.indexOf((char)0); if (indexOfZero != -1) { return s.substring(0, indexOfZero); } else { return s; } } boolean read(InputStream stream) throws IOException { final byte[] fileName = new byte[100]; if (stream.read(fileName) != 100) { return false; } if (fileName[0] == 0) { return false; } Name = getStringFromByteArray(fileName); if (stream.skip(24) != 24) { return false; } final byte[] fileSizeString = new byte[12]; if (stream.read(fileSizeString) != 12) { return false; } Size = 0; for (int i = 0; i < 12; ++i) { final byte digit = fileSizeString[i]; if ((digit < (byte)'0') || (digit > (byte)'7')) { break; } Size *= 8; Size += digit - (byte)'0'; } if (stream.skip(20) != 20) { return false; } final byte linkFlag = (byte)stream.read(); if (linkFlag == -1) { return false; } IsRegularFile = linkFlag == 0 || linkFlag == (byte)'0'; stream.skip(355); if ((linkFlag == (byte)'L' || linkFlag == (byte)'K') && "././@LongLink".equals(Name) && Size < 10240) { final byte[] nameBuffer = new byte[Size - 1]; stream.read(nameBuffer); Name = getStringFromByteArray(nameBuffer); final int skip = 512 - (Size & 0x1ff); stream.skip(skip + 1); } return true; } void erase() { Name = null; } }
QuocAnh90/Uintah_Aalto
Core/Grid/TOBSplineInterpolator.cc
/* * The MIT License * * Copyright (c) 1997-2019 The University of Utah * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <Core/Grid/TOBSplineInterpolator.h> #include <Core/Grid/Patch.h> #include <Core/Grid/Level.h> #include <Core/Malloc/Allocator.h> #include <Core/Math/MiscMath.h> using namespace Uintah; using namespace std; TOBSplineInterpolator::TOBSplineInterpolator() { d_size = 27; d_patch = 0; } TOBSplineInterpolator::TOBSplineInterpolator(const Patch* patch) { d_size = 27; d_patch = patch; } TOBSplineInterpolator::~TOBSplineInterpolator() { } TOBSplineInterpolator* TOBSplineInterpolator::clone(const Patch* patch) { return scinew TOBSplineInterpolator(patch); } void TOBSplineInterpolator::findNodeComponents(const int& ix, int* xn, int& count, const int& low, const int& hi, const double& cellpos) { xn[0] = ix; xn[1] = ix+1; if(cellpos - xn[0] < 0.5){ // lowest node is not on the lower boundary xn[count] = ix-1; count++; } else{ // highest node is not on the upper boundary xn[count] = ix+2; count++; } } void TOBSplineInterpolator::getBSplineWeights(double* Sd, const int* xn, const int& count, const int& low, const int& hi, const double& cellpos) { for(int n=0;n<count;n++){ Sd[n]=evalType1BSpline(cellpos-xn[n]); } } double TOBSplineInterpolator::evalType1BSpline(const double& dx) // internal nodes { // make fractions constants? if(dx < -1.5) // shouldn't happen return -10.0; else if(dx < -.5) return (dx + 1.5) * (dx + 1.5) * .5; else if(dx < .5) return (-dx * dx + .75); else if(dx < 1.5) return .5 * (1.5 - dx) * (1.5 - dx); // if we got here, we are > 1.5. Shouldn't happen. return 10.0; } double TOBSplineInterpolator::evalType2BSpline(const double& dx) // nodes 1 away from boundary { // make fractions constants? if(dx < -1.) // shouldn't happen return -10.0; else if(dx < -.5) // region (1) return 4./3. * (dx + 1.) * (dx + 1.); else if(dx < .5) // region (2) return (-7./6. * dx + 1./6.) * dx + 17./24.; else if(dx < 1.5) // region (3) return .5 * (1.5 - dx) * (1.5 - dx); // if we got here, we are > 1.5 Shouldn't happen. return 10.0; } double TOBSplineInterpolator::evalType3BSpline(const double& dx) // boundary nodes { // make fractions constants? if(dx < 0.) // shouldn't happen return -10.0; else if(dx < 0.5) // region (1) return -4./3. * dx * dx + 1.; else if(dx < 1.5) // region (2) return 2./3. * (3./2. - dx) * (3./2. - dx); // if we got here, we are > 2. Shouldn't happen return 10.0; } void TOBSplineInterpolator::getBSplineGrads(double* dSd, const int* xn, const int& count, const int& low, const int& hi, const double& cellpos) { for(int n=0;n<count;n++){ dSd[n]=evalType1BSplineGrad(cellpos-xn[n]); } } double TOBSplineInterpolator::evalType1BSplineGrad(const double& dx) // internal nodes { // make fractions constants? if(dx < -1.5) // shouldn't happen return 11.0; else if(dx < -.5) return dx + 1.5; else if(dx < .5) return - 2. * dx; else if(dx < 1.5) return dx - 1.5; // if we got here, we are > 1.5. Shouldn't happen. return -11.0; } double TOBSplineInterpolator::evalType2BSplineGrad(const double& dx) // nodes 1 away from boundary { // make fractions constants? if(dx < -1.) // shouldn't happen return 22.0; else if(dx < -.5) // region (1) return 8./3. * (dx + 1.); else if(dx < .5) // region (2) return -7./3. * dx + 1./6.; else if(dx < 1.5) // region (3) return dx - 1.5; // if we got here, we are > 1.5 Shouldn't happen. return -22.0; } double TOBSplineInterpolator::evalType3BSplineGrad(const double& dx) // boundary nodes { // make fractions constants? if(dx < 0.) // shouldn't happen return 33.0; else if(dx < 0.5) // region (1) return -8./3. * dx; else if(dx < 1.5) // region (2) return 4./3. * dx - 2.; // if we got here, we are > 2. Shouldn't happen return -33.0; } int TOBSplineInterpolator::findCellAndWeights(const Point& pos, vector<IntVector>& ni, vector<double>& S, const Matrix3& size) { IntVector low,hi; Point cellpos = d_patch->getLevel()->positionToIndex(pos); d_patch->getLevel()->findInteriorNodeIndexRange(low,hi); int ix = Floor(cellpos.x()); int iy = Floor(cellpos.y()); int iz = Floor(cellpos.z()); int xn[3], yn[3], zn[3]; int countx = 2; int county = 2; int countz = 2; double Sx[3],Sy[3],Sz[3]; findNodeComponents(ix,xn,countx,low.x(),hi.x(),cellpos.x()); findNodeComponents(iy,yn,county,low.y(),hi.y(),cellpos.y()); findNodeComponents(iz,zn,countz,low.z(),hi.z(),cellpos.z()); // zn[0]=iz; // zn[1]=iz+1; getBSplineWeights(Sx, xn, countx, low.x(), hi.x(), cellpos.x()); getBSplineWeights(Sy, yn, county, low.y(), hi.y(), cellpos.y()); getBSplineWeights(Sz, zn, countz, low.z(), hi.z(), cellpos.z()); // Sz[0]=0.5; // Sz[1]=0.5; int n=0; for(int i=0;i<countx;i++){ for(int j=0;j<county;j++){ for(int k=0;k<countz;k++){ ni[n]=IntVector(xn[i],yn[j],zn[k]); S[n] =Sx[i]*Sy[j]*Sz[k]; n++; } } } for(int i=n;i<27;i++){ ni[i]=ni[0]; S[i]=0.; } return 27; } int TOBSplineInterpolator::findCellAndShapeDerivatives(const Point& pos, vector<IntVector>& ni, vector<Vector>& d_S, const Matrix3& size) { IntVector low,hi; Point cellpos = d_patch->getLevel()->positionToIndex(pos); d_patch->getLevel()->findInteriorNodeIndexRange(low,hi); int ix = Floor(cellpos.x()); int iy = Floor(cellpos.y()); int iz = Floor(cellpos.z()); int xn[3], yn[3], zn[3]; int countx = 2; int county = 2; int countz = 2; double Sx[3],Sy[3],Sz[3]; double dSx[3],dSy[3],dSz[3]; findNodeComponents(ix,xn,countx,low.x(),hi.x(),cellpos.x()); findNodeComponents(iy,yn,county,low.y(),hi.y(),cellpos.y()); findNodeComponents(iz,zn,countz,low.z(),hi.z(),cellpos.z()); // zn[0]=iz; // zn[1]=iz+1; getBSplineWeights(Sx, xn, countx, low.x(), hi.x(), cellpos.x()); getBSplineWeights(Sy, yn, county, low.y(), hi.y(), cellpos.y()); getBSplineWeights(Sz, zn, countz, low.z(), hi.z(), cellpos.z()); // Sz[0]=0.5; // Sz[1]=0.5; getBSplineGrads(dSx, xn, countx, low.x(), hi.x(), cellpos.x()); getBSplineGrads(dSy, yn, county, low.y(), hi.y(), cellpos.y()); getBSplineGrads(dSz, zn, countz, low.z(), hi.z(), cellpos.z()); // dSz[0]=0.0; // dSz[1]=0.0; int n=0; for(int i=0;i<countx;i++){ for(int j=0;j<county;j++){ for(int k=0;k<countz;k++){ ni[n]=IntVector(xn[i],yn[j],zn[k]); double xcomp=dSx[i]*Sy[j]*Sz[k]; double ycomp=Sx[i]*dSy[j]*Sz[k]; double zcomp=Sx[i]*Sy[j]*dSz[k]; d_S[n]=Vector(xcomp,ycomp,zcomp); n++; } } } for(int i=n;i<27;i++){ ni[i]=ni[0]; d_S[i]=Vector(0.,0.,0.); } return 27; } int TOBSplineInterpolator::findCellAndWeightsAndShapeDerivatives(const Point& pos, vector<IntVector>& ni, vector<double>& S, vector<Vector>& d_S, const Matrix3& size) { IntVector low,hi; Point cellpos = d_patch->getLevel()->positionToIndex(pos); d_patch->getLevel()->findInteriorNodeIndexRange(low,hi); int ix = Floor(cellpos.x()); int iy = Floor(cellpos.y()); int iz = Floor(cellpos.z()); int xn[3], yn[3], zn[3]; int countx = 2; int county = 2; int countz = 2; double Sx[3],Sy[3],Sz[3]; double dSx[3],dSy[3],dSz[3]; findNodeComponents(ix,xn,countx,low.x(),hi.x(),cellpos.x()); findNodeComponents(iy,yn,county,low.y(),hi.y(),cellpos.y()); findNodeComponents(iz,zn,countz,low.z(),hi.z(),cellpos.z()); // zn[0]=iz; // zn[1]=iz+1; getBSplineWeights(Sx, xn, countx, low.x(), hi.x(), cellpos.x()); getBSplineWeights(Sy, yn, county, low.y(), hi.y(), cellpos.y()); getBSplineWeights(Sz, zn, countz, low.z(), hi.z(), cellpos.z()); // Sz[0]=0.5; // Sz[1]=0.5; getBSplineGrads(dSx, xn, countx, low.x(), hi.x(), cellpos.x()); getBSplineGrads(dSy, yn, county, low.y(), hi.y(), cellpos.y()); getBSplineGrads(dSz, zn, countz, low.z(), hi.z(), cellpos.z()); // dSz[0]=0.0; // dSz[1]=0.0; int n=0; for(int i=0;i<countx;i++){ for(int j=0;j<county;j++){ for(int k=0;k<countz;k++){ ni[n]=IntVector(xn[i],yn[j],zn[k]); double xcomp=dSx[i]*Sy[j]*Sz[k]; double ycomp=Sx[i]*dSy[j]*Sz[k]; double zcomp=Sx[i]*Sy[j]*dSz[k]; d_S[n]=Vector(xcomp,ycomp,zcomp); S[n] =Sx[i]*Sy[j]*Sz[k]; n++; } } } for(int i=n;i<27;i++){ ni[i]=ni[0]; d_S[i]=Vector(0.,0.,0.); S[i]=0.0; } return 27; } int TOBSplineInterpolator::size() { return d_size; }
DNAbro/Java-Game-Project-3
src/controllers/SkillViewController.java
package controllers; /** * Created by Andy on 4/16/2016. */ public class SkillViewController { }
manusa/yakc
quickstarts/quarkus-dashboard/src/main/frontend/src/containers/ContainerList.js
/* * Copyright 2020 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ import React from 'react'; import metrics from '../metrics'; import Icon from '../components/Icon'; import Table from '../components/Table'; const containerHeaders = [ <span><Icon icon='fa-id-card' /> Name</span>, <span><Icon icon='fa-layer-group'/> Image</span>, <span><Icon icon='fa-ethernet' /> Ports</span>, <span><Icon icon='fa-microchip' /> CPU</span>, <span><Icon icon='fa-memory' /> Memory</span> ]; const ContainerList = ({containers, podMetrics, ...properties}) => ( <Table {...properties}> <Table.Head columns={containerHeaders} /> <Table.Body> {containers.map(c => ( <Table.Row key={c.name}> <Table.Cell>{c.name}</Table.Cell> <Table.Cell>{c.image}</Table.Cell> <Table.Cell> {(c.ports ?? []).map((p, idx) => ( <div key={idx}>{p.name} {p.containerPort} {p.protocol}</div> ))} </Table.Cell> <Table.Cell> {podMetrics && podMetrics.containerCpu(c.name).toFixed(3)} </Table.Cell> <Table.Cell> {podMetrics && metrics.selectors.bytesToHumanReadable(podMetrics.containerMemory(c.name))} </Table.Cell> </Table.Row> ))} </Table.Body> </Table> ); export default ContainerList;
ScalablyTyped/SlinkyTyped
v/vso-node-api/src/main/scala/typingsSlinky/vsoNodeApi/testInterfacesMod/TestRunSubstate.scala
<reponame>ScalablyTyped/SlinkyTyped package typingsSlinky.vsoNodeApi.testInterfacesMod import org.scalablytyped.runtime.StObject import scala.scalajs.js import scala.scalajs.js.`|` import scala.scalajs.js.annotation.{JSGlobalScope, JSGlobal, JSImport, JSName, JSBracketAccess} @js.native sealed trait TestRunSubstate extends StObject @JSImport("vso-node-api/interfaces/TestInterfaces", "TestRunSubstate") @js.native object TestRunSubstate extends StObject { @JSBracketAccess def apply(value: Double): js.UndefOr[TestRunSubstate with Double] = js.native @js.native sealed trait AbortedBySystem extends TestRunSubstate /* 4 */ val AbortedBySystem: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.AbortedBySystem with Double = js.native @js.native sealed trait Analyzed extends TestRunSubstate /* 7 */ val Analyzed: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.Analyzed with Double = js.native @js.native sealed trait CanceledByUser extends TestRunSubstate /* 3 */ val CanceledByUser: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.CanceledByUser with Double = js.native @js.native sealed trait CancellationInProgress extends TestRunSubstate /* 8 */ val CancellationInProgress: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.CancellationInProgress with Double = js.native @js.native sealed trait CreatingEnvironment extends TestRunSubstate /* 1 */ val CreatingEnvironment: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.CreatingEnvironment with Double = js.native @js.native sealed trait None extends TestRunSubstate /* 0 */ val None: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.None with Double = js.native @js.native sealed trait PendingAnalysis extends TestRunSubstate /* 6 */ val PendingAnalysis: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.PendingAnalysis with Double = js.native @js.native sealed trait RunningTests extends TestRunSubstate /* 2 */ val RunningTests: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.RunningTests with Double = js.native @js.native sealed trait TimedOut extends TestRunSubstate /* 5 */ val TimedOut: typingsSlinky.vsoNodeApi.testInterfacesMod.TestRunSubstate.TimedOut with Double = js.native }
AlexGenK/Consumers_cabinet_LTKE
app/models/concerns/percent_validator.rb
class PercentValidator < ActiveModel::Validator def validate(record) if record.id allprc = EnPayment.where("consumer_id = ?", record.consumer_id).sum(:percent) - EnPayment.find(record.id).percent + record.percent else allprc = EnPayment.where("consumer_id = ?", record.consumer_id).sum(:percent) + record.percent end if allprc > 100 record.errors.add(:active, "Операція неможлива. Планові платежі перевищили 100%") end end end
darth-willy/mobibench
MobiBenchAutoDeviceClient/src/wvw/mobibench/devclient/server/DevClientHttpServer.java
/** * Copyright 2016 <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * * @author wvw * */ package wvw.mobibench.devclient.server; import java.io.IOException; import java.io.OutputStream; import java.net.InetSocketAddress; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import wvw.mobibench.devserv.server.handler.HandlerException; import wvw.mobibench.devserv.server.handler.HandlerListener; import wvw.mobibench.devserv.server.handler.RequestHandler; import wvw.mobibench.devserv.server.msg.ResponseMsg; import wvw.mobibench.devserv.server.serial.Serializer; import wvw.utils.IOUtils; import wvw.utils.log2.Log; public class DevClientHttpServer implements HttpHandler, HandlerListener { private enum ResponseTypes { OK(200), INTERNAL_SERVER_ERROR(500), METHOD_NOT_ALLOWED(405); private int code; private ResponseTypes(int code) { this.code = code; } public int getCode() { return code; } } private HttpServer server; private RequestHandler handler; private Serializer serial = new Serializer(); public DevClientHttpServer(RequestHandler handler, int port) { try { server = HttpServer.create(new InetSocketAddress(port), 0); server.createContext("/MobiBenchAutoDeviceClient", this); server.setExecutor(null); this.handler = handler; } catch (IOException e) { e.printStackTrace(); } } public void start() { server.start(); } public void stop() { server.stop(0); } public void handle(HttpExchange t) { String method = t.getRequestMethod().toLowerCase(); String uri = t.getRequestURI().toString(); try { if (method.equals("post")) { handler.handle(uri, getBody(t), this); } else if (method.equals("get")) { respondOk("<h1>Welcome to AutoBenchmarkDeviceClient!</h1>", t); } else { Log.i("unsupported method: " + method); respond(ResponseTypes.METHOD_NOT_ALLOWED, t); } } catch (IOException | HandlerException e) { e.printStackTrace(); try { respond(ResponseTypes.INTERNAL_SERVER_ERROR, e, t); } catch (IOException e1) { e1.printStackTrace(); } } } private String getBody(HttpExchange t) throws IOException { return IOUtils.readFromStream(t.getRequestBody()); } private void respond(ResponseTypes type, HttpExchange t) throws IOException { t.sendResponseHeaders(type.getCode(), 0); } private void respondOk(String msg, HttpExchange t) throws IOException { respond(ResponseTypes.OK, msg, t); } private void respond(ResponseTypes type, Object msg, HttpExchange t) throws IOException { String str = serial.serialize(msg); respond(type, str, t); } private void respond(ResponseTypes type, String msg, HttpExchange t) throws IOException { t.sendResponseHeaders(type.getCode(), msg.length()); respond(msg, t); } private void respond(String msg, HttpExchange t) throws IOException { OutputStream os = t.getResponseBody(); os.write(msg.getBytes()); os.close(); } public void response(String url, ResponseMsg message) { } // public Response serve(IHTTPSession session) { // Method method = session.getMethod(); // String uri = session.getUri(); // // try { // switch (method) { // // case GET: // return newFixedLengthResponse( // "<h1>Welcome to AutoBenchmarkDeviceClient!</h1>"); // // case POST: // String data = readBody(session); // handler.handle(uri, data, this); // // return newFixedLengthResponse(serial.serialize(new OkMsg())); // // default: // Log.i("unsupported method: " + method); // // return newStatusResponse(Status.METHOD_NOT_ALLOWED, "Allow", // "POST"); // } // // } catch (HandlerException e) { // Log.e(e); // // return newErrorResponse(e, session); // // } catch (IOException e) { // Log.e(e); // // return newErrorResponse(e, session); // } // } // // protected String readBody(IHTTPSession session) throws IOException { // // return IOUtils.readStr(session.getInputStream()); // // InputStream in = session.getInputStream(); // // StringBuffer sb = new StringBuffer(); // byte[] bytes = new byte[1024]; // // int read = 0; // while ((read = in.read(bytes)) > 0) { // sb.append(new String(bytes, 0, read)); // // if (in.available() == 0) // break; // } // // return sb.toString(); // } // // protected Response newErrorResponse(Exception e, IHTTPSession session) { // Error ret = new Error(e); // // return newFixedLengthResponse(Status.INTERNAL_ERROR, // NanoHTTPD.MIME_PLAINTEXT, serial.serialize(ret)); // } // // protected static String getOrigin(String url) { // return url.substring(0, url.indexOf("/")); // } // // public void response(String url, ResponseMsg message) { // try { // NetUtils.sendPost(url, serial.serialize(message)); // // } catch (IOException e) { // e.printStackTrace(); // } // } }
marcosrachid/blockchain-criptocurrency
src/main/java/com/custom/blockchain/node/network/server/request/arguments/TransactionsResponseArguments.java
<filename>src/main/java/com/custom/blockchain/node/network/server/request/arguments/TransactionsResponseArguments.java<gh_stars>1-10 package com.custom.blockchain.node.network.server.request.arguments; import java.util.Set; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; import com.custom.blockchain.transaction.SimpleTransaction; public class TransactionsResponseArguments implements GenericArguments { private static final long serialVersionUID = 1L; private Set<SimpleTransaction> transactions; public TransactionsResponseArguments() { super(); } public TransactionsResponseArguments(Set<SimpleTransaction> transactions) { super(); this.transactions = transactions; } public Set<SimpleTransaction> getTransactions() { return transactions; } public void setTransactions(Set<SimpleTransaction> transactions) { this.transactions = transactions; } @Override public int hashCode() { return new HashCodeBuilder().append(transactions).hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; TransactionsResponseArguments other = (TransactionsResponseArguments) obj; return new EqualsBuilder().append(transactions, other.transactions).isEquals(); } @Override public String toString() { return new ToStringBuilder(this).append("transactions", transactions).build(); } }
lushstar/pagoda
pagoda-service/src/main/java/com/lushstar/pagoda/service/controller/AppServiceController.java
<filename>pagoda-service/src/main/java/com/lushstar/pagoda/service/controller/AppServiceController.java package com.lushstar.pagoda.service.controller; import ma.glasnost.orika.MapperFacade; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.util.StringUtils; import org.springframework.web.bind.annotation.*; import com.lushstar.pagoda.dal.model.AppEntity; import com.lushstar.pagoda.service.bo.AppBo; import com.lushstar.pagoda.service.response.ServiceResponse; import com.lushstar.pagoda.service.service.AppService; import java.util.Date; import java.util.List; /** * <p>description : AppServiceController * * <p>blog : https://blog.csdn.net/masteryourself * * @author : masteryourself * @version : 1.0.0 * @date : 2020/2/25 21:44 */ @RestController @RequestMapping(value = "service/app") public class AppServiceController { @Autowired private AppService appService; @Autowired private MapperFacade mapperFacade; @GetMapping(value = "list") public ServiceResponse<List<AppBo>> list() { List<AppEntity> appEntityList = appService.list(); return ServiceResponse.success(mapperFacade.mapAsList(appEntityList, AppBo.class)); } @PostMapping(value = "add") public ServiceResponse<AppBo> add(@RequestBody AppBo appBo) { AppEntity appEntity = appService.save(mapperFacade.map(appBo, AppEntity.class)); return ServiceResponse.success(mapperFacade.map(appEntity, AppBo.class)); } @GetMapping(value = "find/{id}") public ServiceResponse<AppBo> findById(@PathVariable Long id) { AppEntity appEntity = appService.findById(id); return ServiceResponse.success(mapperFacade.map(appEntity, AppBo.class)); } @PostMapping(value = "update") public ServiceResponse<AppBo> update(@RequestBody AppBo appBo) { AppEntity appEntity = appService.findById(appBo.getId()); if (!StringUtils.isEmpty(appBo.getDescription())) { appEntity.setDescription(appBo.getDescription()); } if (appBo.getUpdateTime() == null) { appEntity.setUpdateTime(new Date()); } else { appEntity.setUpdateTime(appBo.getUpdateTime()); } if (appBo.getDel() != null) { appEntity.setDel(appBo.getDel()); } return ServiceResponse.success(mapperFacade.map(appService.save(appEntity), AppBo.class)); } }
bmatthias/config-builder
src/test/java/com/tngtech/configbuilder/annotation/typetransformer/StringCollectionToCommaSeparatedStringTransformerTest.java
<gh_stars>1-10 package com.tngtech.configbuilder.annotation.typetransformer; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.tngtech.configbuilder.annotation.valuetransformer.StringCollectionToCommaSeparatedStringTransformer; import com.tngtech.configbuilder.util.ConfigBuilderFactory; import com.tngtech.configbuilder.util.FieldValueTransformer; import com.tngtech.configbuilder.util.GenericsAndCastingHelper; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import java.lang.reflect.ParameterizedType; import java.util.*; import static org.hamcrest.core.IsEqual.equalTo; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class StringCollectionToCommaSeparatedStringTransformerTest { private StringCollectionToCommaSeparatedStringTransformer transformer; @Mock private FieldValueTransformer fieldValueTransformer; @Mock private ConfigBuilderFactory configBuilderFactory; @Mock private GenericsAndCastingHelper genericsAndCastingHelper; @Before public void setUp() { transformer = new StringCollectionToCommaSeparatedStringTransformer(); } @Test public void testTransformer() { Collection<String> collection = Lists.newArrayList("Rakim","<NAME>","2Pac"); transformer.initialize(fieldValueTransformer, configBuilderFactory, ","); String actualResult = transformer.transform(collection); assertThat(actualResult, equalTo("Rakim,<NAME>,2Pac")); transformer.initialize(fieldValueTransformer, configBuilderFactory, ";"); actualResult = transformer.transform(collection); assertThat(actualResult, equalTo("Rakim;<NAME>;2Pac")); } @Test public void testIsMatching() throws Exception { initializeFactoryAndHelperMocks(); transformer.initialize(fieldValueTransformer, configBuilderFactory, ","); assertTrue(transformer.isMatching(Sets.newHashSet("1","2"), String.class)); assertTrue(transformer.isMatching(Lists.newArrayList("1","2"), String.class)); assertFalse(transformer.isMatching(new Object(), String.class)); } private void initializeFactoryAndHelperMocks(){ when(configBuilderFactory.getInstance(GenericsAndCastingHelper.class)).thenReturn(genericsAndCastingHelper); when(genericsAndCastingHelper.getWrapperClassIfPrimitive(String.class)).thenReturn((Class)String.class); when(genericsAndCastingHelper.getWrapperClassIfPrimitive(ArrayList.class)).thenReturn((Class)ArrayList.class); when(genericsAndCastingHelper.getWrapperClassIfPrimitive(HashSet.class)).thenReturn((Class)HashSet.class); when(genericsAndCastingHelper.getWrapperClassIfPrimitive(Object.class)).thenReturn((Class) Object.class); when(genericsAndCastingHelper.castTypeToClass(HashSet.class)).thenReturn((Class)HashSet.class); when(genericsAndCastingHelper.castTypeToClass(ArrayList.class)).thenReturn((Class)ArrayList.class); when(genericsAndCastingHelper.castTypeToClass(String.class)).thenReturn((Class)String.class); when(genericsAndCastingHelper.castTypeToClass(Object.class)).thenReturn((Class)Object.class); when(genericsAndCastingHelper.castTypeToClass(((ParameterizedType)(transformer.getClass().getGenericSuperclass())).getActualTypeArguments()[0])).thenReturn((Class)Collection.class); } }
windchopper/common
common-preferences/src/main/java/com/github/windchopper/common/preferences/types/StringType.java
package com.github.windchopper.common.preferences.types; import com.github.windchopper.common.preferences.PreferencesEntryFlatType; import static com.github.windchopper.common.util.stream.FallibleFunction.identity; public class StringType extends PreferencesEntryFlatType<String> { public StringType() { super(identity(), identity()); } }
lnc441401369/lnc.github.io
src/main/java/com/myblog/model/Admin.java
package com.myblog.model; import java.io.Serializable; public class Admin implements Serializable { private Integer id; private String adminname; private String adminpasswd; public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public String getAdminname() { return adminname; } public void setAdminname(String adminname) { this.adminname = adminname; } public String getAdminpasswd() { return adminpasswd; } public void setAdminpasswd(String adminpasswd) { this.adminpasswd = <PASSWORD>; } }
tsungming/Alameda
operator/pkg/utils/resources/listpods.go
<reponame>tsungming/Alameda<gh_stars>0 package resources import ( "context" "fmt" "strings" logUtil "github.com/containers-ai/alameda/operator/pkg/utils/log" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( scope = logUtil.RegisterScope("listresourceutil", "listresourceutil log", 0) ) type ListPods struct { client client.Client } func NewListPods(client client.Client) *ListPods { return &ListPods{ client: client, } } func (listpods *ListPods) ListPods(namespace, name, kind string) []corev1.Pod { podList := []corev1.Pod{} deploymentFound := &appsv1.Deployment{} if strings.ToLower(kind) == "deployment" { err := listpods.client.Get(context.TODO(), types.NamespacedName{ Namespace: namespace, Name: name, }, deploymentFound) if err != nil { scope.Error(err.Error()) return podList } else { return listpods.getPodsFromDeployment(deploymentFound) } } return podList } func (listpods *ListPods) getPodsFromDeployment(deployment *appsv1.Deployment) []corev1.Pod { podList := []corev1.Pod{} pods := &corev1.PodList{} name := deployment.GetName() ns := deployment.GetNamespace() if deployment.Spec.Selector == nil { scope.Warnf(fmt.Sprintf("List pods of alameda deployment %s/%s failed due to no matched labels found.", ns, name)) return podList } labels := deployment.Spec.Selector.MatchLabels err := listpods.client.List(context.TODO(), client.InNamespace(ns). MatchingLabels(labels), pods) if err != nil { scope.Warnf(fmt.Sprintf("List pods of alameda deployment %s/%s failed.", ns, name)) } else { var deploymentName string for _, pod := range pods.Items { for _, ownerReference := range pod.ObjectMeta.GetOwnerReferences() { if ownerReference.Kind == "ReplicaSet" { replicaSetName := ownerReference.Name deploymentName = replicaSetName[0:strings.LastIndex(replicaSetName, "-")] } break } if deploymentName == name { podList = append(podList, pod) } } } scope.Infof(fmt.Sprintf("%d pods founded in alameda deployment %s/%s.", len(podList), ns, name)) return podList }
DerangedMonkeyNinja/openperf
api/client/golang/client/cpu_generator/delete_cpu_generator_responses.go
// Code generated by go-swagger; DO NOT EDIT. package cpu_generator // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "fmt" "github.com/go-openapi/runtime" "github.com/go-openapi/strfmt" ) // DeleteCPUGeneratorReader is a Reader for the DeleteCPUGenerator structure. type DeleteCPUGeneratorReader struct { formats strfmt.Registry } // ReadResponse reads a server response into the received o. func (o *DeleteCPUGeneratorReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { switch response.Code() { case 204: result := NewDeleteCPUGeneratorNoContent() if err := result.readResponse(response, consumer, o.formats); err != nil { return nil, err } return result, nil default: return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) } } // NewDeleteCPUGeneratorNoContent creates a DeleteCPUGeneratorNoContent with default headers values func NewDeleteCPUGeneratorNoContent() *DeleteCPUGeneratorNoContent { return &DeleteCPUGeneratorNoContent{} } /* DeleteCPUGeneratorNoContent describes a response with status code 204, with default header values. No Content */ type DeleteCPUGeneratorNoContent struct { } func (o *DeleteCPUGeneratorNoContent) Error() string { return fmt.Sprintf("[DELETE /cpu-generators/{id}][%d] deleteCpuGeneratorNoContent ", 204) } func (o *DeleteCPUGeneratorNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { return nil }